-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdataset.py
More file actions
90 lines (70 loc) · 3.28 KB
/
dataset.py
File metadata and controls
90 lines (70 loc) · 3.28 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
import torch
from torch.utils.data import Dataset
from datasets import load_dataset
class QADataset(Dataset):
def __init__(
self, config, tokenizer,
):
self.dataset = load_dataset(config.dataset)[config.split]
n_subset = int(config.model_train_fraction * len(self.dataset))
self.dataset= self.dataset.select(range(n_subset))
print(
f"Loaded dataset of size {len(self.dataset)} with columns {self.dataset.column_names}"
)
self.tokenizer = tokenizer
self.max_length = config.max_len
# Special token IDs
self.pad_id = self.tokenizer.token_to_id(config.pad_token)
self.sep_id = self.tokenizer.token_to_id(config.sep_token)
self.end_id = self.tokenizer.token_to_id(config.end_token)
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
question, answer = self.dataset[idx]["question"], self.dataset[idx]["answer"]
question_ids = self.tokenizer.encode(question).ids
answer_ids = self.tokenizer.encode(answer).ids
tokenized_sequence = question_ids + [self.sep_id] + answer_ids + [self.end_id]
# Pad and/or truncate the sequence if necessary
pad_length = self.max_length - len(tokenized_sequence) + 1
if pad_length < 0: # Truncate (answer) but keep [END] token
tokenized_sequence = tokenized_sequence[:self.max_length - 1] + [self.end_id]
pad_length = 1
tokenized_sequence += [self.pad_id] * pad_length
tokenized_sequence = torch.tensor(tokenized_sequence)
# Create source and target sequences by shifting
source_sequence = tokenized_sequence[:-1].clone()
target_sequence = tokenized_sequence[1:].clone()
# Masking padding tokens (True = padding)
key_padding_mask = torch.zeros_like(source_sequence)
key_padding_mask[-pad_length:] = 1
key_padding_mask = key_padding_mask.bool()
# Loss mask to ignore padding tokens
loss_mask = torch.zeros_like(target_sequence)
loss_mask[-pad_length:] = 1
loss_mask = loss_mask.bool()
target_sequence[loss_mask] = -100
return {
"source_sequence": source_sequence,
"target_sequence": target_sequence,
"key_padding_mask": key_padding_mask,
}
if __name__ == "__main__":
from config import config
from tokenizers import Tokenizer
from datasets import load_dataset
# Sanity check the dataset class
tokenizer = Tokenizer.from_file(config.tokenizer_filename)
idx = 1
config.max_len = 64 # For testing purposes
dataset = QADataset(config, tokenizer)
source, target, key_padding_mask = dataset[idx].values()
print("Source sequence shape:", source.shape)
print("Target sequence shape:", target.shape)
print("Key padding mask shape:", key_padding_mask.shape)
print("Source sequence:", source)
print("Target sequence:", target)
print("Key padding mask:", key_padding_mask)
decoded_source = tokenizer.decode(source.tolist(), skip_special_tokens=False)
decoded_target = tokenizer.decode(target[target != -100].tolist(), skip_special_tokens=False)
print("Decoded source sequence:", decoded_source)
print("Decoded target sequence:", decoded_target)