Nghengu123 commited on
Commit
931d49a
·
1 Parent(s): 7fc3eff

Delete dataset.py

Browse files
Files changed (1) hide show
  1. dataset.py +0 -67
dataset.py DELETED
@@ -1,67 +0,0 @@
1
- import json
2
- import os
3
- import re
4
- import torch
5
-
6
-
7
- def read_jsonl(path: str):
8
- with open(path) as f:
9
- data = json.load(f)
10
- return data
11
-
12
-
13
- def get_data(path):
14
- '''
15
- If there are many json files to load:
16
- path_item = os.path.join("data/", f"{path}.jsonl")
17
- data_item = read_jsonl(path_item)
18
- '''
19
- data = read_jsonl(path)
20
-
21
- for ex in data:
22
- if ex["options"] != "":
23
- ex.update(Problem=ex["Problem"] + "\n Options: " + ex["options"])
24
- else:
25
- ex.update(Problem=ex["Problem"] + "\n" + "The answer to the question is:")
26
- #ex.update(answer=ex["answer"] + "<|endoftext|>")
27
- # Update the diagramRef tokenizer method here
28
-
29
- print(f"Loaded {len(data)} examples")
30
- return data
31
-
32
- class TestDataset(torch.utils.data.Dataset):
33
- '''
34
- tokenizer: is a text -> token converter for model input
35
- data: is the data loaded from the "get_data" function
36
- '''
37
- def __init__(self, tokenizer, data, loss_on_prefix=True):
38
- self.data = data
39
- self.problems = [ex["Problem"] for ex in self.data]
40
- self.answer = [ex["answer"] for ex in self.data]
41
- self.problems = tokenizer(self.problems, padding=False)
42
- self.answer = tokenizer(self.answer, padding=False)
43
- self.loss_on_prefix = loss_on_prefix
44
- self.max_len_of_items = max(
45
- [
46
- len(self.problems["input_ids"][i]) + len(self.answer["input_ids"][i])
47
- for i in range(len(self.data))
48
- ]
49
- )
50
- #print(f"Max tokens: {self.max_len}")
51
-
52
- def __len__(self):
53
- return len(self.data)
54
-
55
- def __getitem__(self, idx):
56
- qn_tokens = self.problems["input_ids"][idx]
57
- answer_tokens = self.answer["input_ids"][idx]
58
- pad_tokens = [0] * (self.max_len_of_items - len(qn_tokens) - len(answer_tokens))
59
- tokens = qn_tokens + answer_tokens + pad_tokens
60
- mask = (
61
- ([int(self.loss_on_prefix)] * len(qn_tokens))
62
- + ([1] * len(answer_tokens))
63
- + ([0] * len(pad_tokens))
64
- )
65
- tokens = torch.tensor(tokens)
66
- mask = torch.tensor(mask)
67
- return dict(input_ids=tokens, attention_mask=mask)