-
Notifications
You must be signed in to change notification settings - Fork 0
/
data2.py
1609 lines (1375 loc) · 80.3 KB
/
data2.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
import os
import json
import re
import string
import numpy as np
from torch import tensor
from torch._C import LoggerBase
from tqdm import tqdm
import torch
from torch.utils.data import Dataset, TensorDataset, DataLoader, RandomSampler, SequentialSampler
import argparse
import numpy as np
import json
import pickle
from collections import defaultdict
from transformers.utils.dummy_pt_objects import load_tf_weights_in_funnel
from bart import MyBartModel
from span_utils import preprocess_span_input, preprocess_qpa, dump_pickle, load_pickle
import itertools
from numpy import random
from sklearn.cluster import KMeans
from dateutil.parser import ParserError, parse
from number_parser import parse_number
import csv
class QAData(object):
def __init__(self, logger, args, data_path, dataset_type):
"""[summary]
Args:
logger ([type]): [description]
args ([type]): [description]
data_path ([type]): [description]
dataset_type ([type]): ["train" or "dev"]
Raises:
NotImplementedError: [description]
"""
self.data_path = data_path
# determine is_training status now as dataset_type might be modfied later for file accessing
self.is_training = dataset_type == "train"
self.dataset_type = dataset_type
if args.debug:
self.data_path = data_path.replace("train", "dev")
# under debug
# we don't want to save train file as dev
# we want to load dev file as train (we simply don't save)
dataset_type_for_file_accessing = "dev"
else:
if args.fine_tune:
logger.info(
"Not AmbigQA test dataset available, using dev dataset")
if not self.is_training:
dataset_type_for_file_accessing = "dev" # fine tuning stage
else:
dataset_type_for_file_accessing = dataset_type
else:
dataset_type_for_file_accessing = dataset_type
# NOTE: self.data is the original data. Not tokenized nor encoded.
with open(self.data_path, "r") as f:
# format example: [ {'id': '-8178292525996414464', 'question': 'big little lies season 2 how many episodes', 'answer': ['seven']}, ..... ]
self.data = json.load(f)
if type(self.data) == dict:
self.data = self.data["data"]
self.processed_data = None
if args.debug :
if self.is_training == False:
logger.warn("[DEBUG MODE] Load all dev data")
self.data = self.data[:100]
# else:
# self.data = self.data[100:]
# logger.warn("[DEBUG MODE] Load partial dev data")
# self.data = self.data[:500]
assert type(self.data) == list
assert all(["id" in d for d in self.data]), self.data[0].keys()
if type(self.data[0]["id"]) == int:
for i in range(len(self.data)):
self.data[i]["id"] = str(self.data[i]["id"])
self.index2id = {i: d["id"] for i, d in enumerate(self.data)}
self.id2index = {d["id"]: i for i, d in enumerate(self.data)}
# TODO: correct it back
self.load = True # debug mode also needs load
# self.load = not args.debug # do not load the large tokenized dataset
self.logger = logger
self.args = args
if "test" in self.data_path:
self.data_type = "test"
elif "dev" in self.data_path:
self.data_type = "dev"
elif "train" in self.data_path:
self.data_type = "train"
else:
raise NotImplementedError()
self.max_input_length = self.args.max_input_length
self.tokenizer = None
self.dataset = None
self.dataloader = None
self.cache = None
self.debug = args.debug
self.answer_type = "span" if "extraction" in args.predict_type.lower(
) else "seq" # TODO: condition on args.predict_type
self.dataset_name = None # ambig or nq
self.passages = None
if self.args.passage_clustering: # only need to load when using passage clustering
self.clustered_passages_path = "data/clustering_results/AmbigQA_"
postfix = ["top", self.args.top_k_passages, "passages",
self.data_type, "is_training", self.is_training, "rank_threshold", self.args.rank_threshold]
postfix = [str(x) for x in postfix]
postfix = "_".join(postfix)
if self.args.debug:
postfix += "_debug" # it might affect the number of data
self.clustered_passages_path += postfix
# if os.path.exists(self.clustered_passages_path):
# with open(self.clustered_passages_path) as fp:
# self.passages = pickle.load(fp)
self.question_ids = [d["id"] for d in self.data]
# idea of naming detection is finding the folder name
if any([n in args.ranking_folder_path for n in ["nq", "nqopen"]]):
ranking_file_name = "nq_"
data_file_n = "nqopen-"
assert any(n in args.data_folder_path for n in ["nq", "nqopen"]) == True,\
"data folder path/ranking_folder_path is wrong"
assert any(n in self.data_path for n in ["nq", "nqopen"]) == True,\
"data path/ranking_folder_path is wrong"
self.dataset_name = "nq"
elif any([n in args.ranking_folder_path for n in ["ambigqa"]]):
ranking_file_name = "ambigqa_"
data_file_n = "ambigqa_" # NOTE: it's for light data only
assert "ambigqa" in args.data_folder_path,\
"data folder path/ranking_folder_path is wrong"
assert "ambigqa" in self.data_path,\
"data path/ranking_folder_path is wrong"
self.dataset_name = "ambig"
else:
self.logger.warn("args.ranking_folder_path: ",
args.ranking_folder_path)
exit()
self.wiki_passage_path = args.passages_path
self.ranking_path = os.path.join(
args.ranking_folder_path, f"{ranking_file_name}{dataset_type_for_file_accessing}.json")
self.data_path = os.path.join(
args.data_folder_path, f"{data_file_n}{dataset_type_for_file_accessing}.json")
self.top_k_passages = args.top_k_passages
self.metric = "EM" if self.dataset_name == "nq" else "F1"
self.sep_token = "<SEP>"
self.spaced_sep_token = " " + self.sep_token + " "
def __len__(self):
return len(self.data)
def decode(self, tokens):
return self.tokenizer.decode(tokens, skip_special_tokens=True, clean_up_tokenization_spaces=True).lower()
def decode_batch(self, tokens):
return [self.decode(_tokens) for _tokens in tokens]
def flatten(self, answers, is_ambig=False):
if not is_ambig:
new_answers, metadata = [], []
for answer in answers:
metadata.append(
(len(new_answers), len(new_answers)+len(answer)))
new_answers += answer
return new_answers, metadata
else:
# sep token id
new_answers, metadata = [], []
# one data entry: [ [singleQA["USA", "US"]], [multipleQA["CA", "Canada"], ["Mexico"] ] ]
# _answers: [] answer for one data entry
# answer: answer for one annotation (singleQA or multipleQA) [ [singleQA["USA", "US"]], [multipleQA["CA", "Canada"], ["Mexico"] ] ]
# _answer: a list of acceptable answers for one
for _answers in answers:
assert type(_answers) == list
metadata.append([])
import pdb
pdb.set_trace()
# _answer: current: a list of acceptable answers: [["US"], ["Canada"]] expect: [["US", "USA"], ["Canada", "CA"]]
for answer in _answers:
metadata[-1].append([])
# current: "United States" expect: ["United States", "USA"]
for _answer in answer:
# one possibility: each singleAnswer qaPair has a list
assert len(_answer) > 0, _answers
assert type(_answer) == list and type(
_answer[0]) == str, _answers
# _answer should be a tuple of one answer
metadata[-1][-1].append((len(new_answers),
len(new_answers)+len(_answer)))
new_answers += _answer
return new_answers, metadata
def load_dataset(self, tokenizer, do_return=False):
logging_prefix = f"[{self.dataset_type} data]\t".upper()
self.tokenizer = tokenizer
# prepare paths and special token ids
# NOTE: Might have bug here
# self.tokenizer.sep_token_id = self.tokenizer.convert_tokens_to_ids([self.sep_token])[0]
# self.tokenizer.sep_token = self.sep_token # set tokenizer sep token make sure masking is working properly
# For example: BartTokenizer -> BartTokenized
postfix = tokenizer.__class__.__name__.replace("zer", "zed")
prepend_question_token = False
if postfix[:2].lower() == "t5": # TODO: find a smarter way to check if it's dataset for T5
prepend_question_token = True
if self.args.augment_k_times == 1:
postfix = [postfix, "max_input_length", self.max_input_length, "top",
self.top_k_passages, "rank_threshold", self.args.rank_threshold, self.answer_type, "is_training", self.is_training] # TODO: can be written more elegantly by using dictionary
else:
postfix = [postfix, "max_input_length", self.max_input_length, "top",
self.top_k_passages, "rank_threshold", self.args.rank_threshold ,self.answer_type, "answers", self.args.augment_k_times, "augmentation", "is_training", self.is_training]
postfix = [str(x) for x in postfix]
postfix = "_".join(postfix)
if self.debug:
postfix += "_debug"
if self.args.passage_clustering:
postfix += "_clustered"
# TODO: decide to delete tokenized path if it's finally not needed
tokenized_path = os.path.join(
"/".join(self.data_path.split("/")[:-2]), "Tokenized",
self.data_path.split("/")[-1].replace(".json", "-{}.json".format(postfix))) # replace .json by a formatted postfix
clustered_passages_path = tokenized_path.replace(
"Tokenized", "Clustered").replace(".json", "_input.p")
wiki_embedding_path = "data/wiki2020embedding"
encoded_input_path = tokenized_path.replace(
"Tokenized", "Encoded").replace(".json", "_input.p")
encoded_answer_path = tokenized_path.replace(
"Tokenized", "Encoded").replace(".json", "_answer.p")
metadata_path = tokenized_path.replace(
"Tokenized", "Encoded").replace(".json", "_metadata.p")
processed_data_path = encoded_input_path.replace("_input", "_data")
def safe_remove(file_path):
if os.path.exists(file_path):
os.remove(file_path)
def remove_confirmation_prompt(file_name):
prompt = input(
f"Confirm to remove {file_name}? (y/n) ").lower()
return prompt == "yes" or prompt == "y"
if self.args.retokenize == True:
if remove_confirmation_prompt("tokenization file"):
safe_remove(tokenized_path)
else:
exit()
if self.args.reencode == True:
if remove_confirmation_prompt("encoding files"):
safe_remove(processed_data_path)
# safe_remove(encoded_answer_path)
# safe_remove(metadata_path)
else:
exit()
self.cache = os.path.exists(processed_data_path)
joined_answers_l = []
# load exists cache or pre-process a new one
# General procedure:
# 1. check if pickle cache exists
# 2. if not, check if tokenized data exists
# 3. if not, preprocess(load passages and encode) from scratch
if self.load and self.cache:
self.logger.info(
logging_prefix + f"Found pickle cache, start loading {encoded_input_path}")
if self.answer_type == "seq":
# so we load encoding (it's batch + dictionary) and then pass then into
# input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, \
# metadata, passage_coverage_rate = json.load(f)
question_input, question_metadata, question_ids, answer_input, answer_metadata, joined_answers_l = load_pickle(
encoded_input_path)# , encoded_answer_path, metadata_path)
self.question_ids = question_ids
# import pdb; pdb.set_trace()
input_ids, attention_mask = question_input["input_ids"], question_input["attention_mask"]
decoder_input_ids, decoder_attention_mask = answer_input[
"input_ids"], answer_input["attention_mask"]
if self.dataset_name == "ambig":
for (idx, joined_answers) in enumerate(joined_answers_l):
self.data[idx]["answers"] = joined_answers
# inputs are lists of integers
elif self.answer_type == "span":
d = preprocess_span_input(
encoded_input_path, encoded_answer_path, metadata_path,
self.logger, tokenizer, self.max_input_length, is_training=self.is_training)
input_ids = d["input_ids"]
attention_mask = d["attention_mask"]
token_type_ids = d["token_type_ids"]
start_positions = d["start_positions"]
end_positions = d["end_positions"]
answer_mask = d["answer_mask"]
# Q: input (QA concatenation, y= answer?)
# label is the start and end positions
answer_coverage_rate = d["answer_coverage_rate"]
self.logger.info(
logging_prefix + f"answer coverage rate by passages: {answer_coverage_rate}")
else:
self.logger.warn("wrong answer type")
exit()
else: # not found pickle cache
self.logger.info(logging_prefix +
"Not found pickle cache, start preprocessing...")
if self.load and os.path.exists(tokenized_path): # found tokenized path
self.logger.info(
logging_prefix + "Loading pre-tokenized data from {}".format(tokenized_path))
with open(tokenized_path, "r") as f:
if self.answer_type == "seq":
input_ids, question_metadata, attention_mask, decoder_input_ids, decoder_attention_mask, \
answer_metadata, self.data, passage_coverage_rate = json.load(f)
elif self.answer_type == "span":
input_ids, attention_mask, token_type_ids, start_positions, end_positions, answer_mask, passage_coverage_rate = json.load(
f)
else:
self.logger.warn(logging_prefix +
"Unrecognizable answer type")
exit()
self.logger.info(
logging_prefix + f"Passage kept rate(after truncation): {passage_coverage_rate * 100} %")
else: # not found tokenized data
self.logger.info(
logging_prefix + "Not found tokenized data, start tokenizing...")
self.logger.info(
logging_prefix + "Not found tokenized data, start loading passagesing...")
# pre-process question list from data
questions = [d["question"] if d["question"].endswith("?") else d["question"]+"?"
for d in self.data]
# NOTE: move code to data2
# import pdb; pdb.set_trace()
# pre-process answer list from data
if self.dataset_name == "ambig":
answers = []
for (idx, data_entry) in enumerate(self.data):
cur_answer = []
# Q: does data_entry has more than one annotations? Or each answer is categorized
for qa_d in data_entry["annotations"]:
# import pdb
# pdb.set_trace()
if qa_d["type"] == "singleAnswer":
answer_for_one_qa_pair = [list(
set(qa_d["answer"]))] # a list of acceptable answers for one question interpretation
cur_answer.append(
answer_for_one_qa_pair)
elif qa_d["type"] == "multipleQAs":
for pair in qa_d["qaPairs"]:
answer_for_one_qa_pair = [list(
set(pair["answer"]))] # a list of semantic similar answers
cur_answer.append(
answer_for_one_qa_pair)
else:
self.logger.warn("error in qa_d type: ")
exit()
# cur_answer [ [singleQA["USA", "US"]], [multipleQA["CA", "Canada"], ["Mexico"] ] ]
assert type(cur_answer) == list and \
all([type(answer) == list for answer in cur_answer]) and \
all([type(
_a) == str for answer in cur_answer for _answer in answer for _a in _answer])
answers.append(cur_answer)
# if self.answer_type == "span": # ambig span
# answers = []
# for (idx, d) in enumerate(self.data):
# cur_answer = []
# for qa_d in d["annotations"]:
# if qa_d["type"] == "singleAnswer":
# # answers.append(qa_d["answer"])
# cur_answer.extend(qa_d["answer"])
# elif qa_d["type"] == "multipleQAs":
# # answers.append(pair["answer"]) for pair in qa_d["qaPairs"]]
# pair_answers = []
# for pair in qa_d["qaPairs"]:
# pair_answers.extend(pair["answer"])
# cur_answer.extend(pair_answers)
# else:
# self.logger.warn("error in qa_d type: ")
# exit()
# self.data[idx]["answers"] = cur_answer
# # for one question, there is one list of answers
# answers.append(cur_answer)
# elif self.answer_type == "seq": # ambig seq
# answers = []
# for (idx, data_entry) in enumerate(self.data):
# cur_answer = []
# # Q: does data_entry has more than one annotations? Or each answer is categorized
# for qa_d in data_entry["annotations"]:
# import pdb; pdb.set_trace()
# if qa_d["type"] == "singleAnswer":
# answer_for_one_qa_pair = [list(
# set(qa_d["answer"]))] # a list of acceptable answers for one question interpretation
# cur_answer.append(
# answer_for_one_qa_pair)
# elif qa_d["type"] == "multipleQAs":
# for pair in qa_d["qaPairs"]:
# answer_for_one_qa_pair = [list(
# set(pair["answer"]))] # a list of semantic similar answers
# cur_answer.append(
# answer_for_one_qa_pair)
# else:
# self.logger.warn("error in qa_d type: ")
# exit()
# # cur_answer [ [singleQA["USA", "US"]], [multipleQA["CA", "Canada"], ["Mexico"] ] ]
# assert type(cur_answer) == list and \
# all([type(answer) == list for answer in cur_answer]) and \
# all([type(
# _a) == str for answer in cur_answer for _answer in answer for _a in _answer])
# answers.append(cur_answer)
# else:
# raise NotImplementedError()
elif self.dataset_name == "nq":
answers = [d["answer"] for d in self.data]
else:
self.logger.warn(
f"wrong dataset type: {self.dataset_name}")
exit()
# flatten answer list
answers, metadata = self.flatten(
answers, self.dataset_name == "ambig")
if self.args.do_lowercase:
questions = [question.lower() for question in questions]
answers = [answer.lower() for answer in answers]
# answers has been flattened, so it's normal to have more answers than questions
self.logger.info(logging_prefix +
"Start concatenating question and passages ")
def init_top_k_passages():
if self.args.passage_clustering:
self.top_k_passages = 100
self.logger.info(
logging_prefix + "Passage clustering takes all (top 100) passages")
embedding_path = "data/wiki2020embedding/"
self.logger.info(
logging_prefix + "Loading passages embedding...")
passage_embedding = load_passage_embeddings(
embedding_path)
self.passages = topKPassasages(
self.top_k_passages, self.wiki_passage_path, self.ranking_path, self.data_path, passage_embedding=passage_embedding)
else:
self.passages = topKPassasages(
self.top_k_passages, self.wiki_passage_path, self.ranking_path, self.data_path)
if self.answer_type == "seq":
if self.dataset_name == "nq": # nq seq answer
questions = ["<s> " + q for q in questions]
# TODO: add them to arguments
# note that after this questions are actually a concatenation of questions and passages
self.logger.info(logging_prefix + f"Start concatenating question and passages for top {self.top_k_passages} passages")
self.passages = topKPassasages(
self.top_k_passages, self.wiki_passage_path, self.ranking_path, self.data_path)
for i in tqdm(range(len(questions))):
# mark the begining of passages
questions[i] += " <s> "
# add passage one by one
for p in self.passages.get_passages(i, self.args.top_k_passages):
# format: [CLS] question [SEP] title 1 [SEP] passages
questions[i] += self.spaced_sep_token + \
p["title"] + self.spaced_sep_token + p["text"]
# mark the begining of passages
questions[i] += " </s> "
question_metadata = None
answer_metadata = None
# NOTE: no need to rename
# questions_n_passages = questions # rename
# new_questions = questions # rename
elif self.dataset_name == "ambig": # ambig seq answer
# TODO: add function pre_process in utils.py
if prepend_question_token: # T5
questions = ["<s> question: " +
question for question in questions] # t5 tokenizer doesn't have <s>
else:
questions = ["<s> " + q for q in questions] # Bart
questions = [q + " </s> " for q in questions]
questions_with_clustered_passages = []
# TODO: add them to arguments
# note that after this questions are actually a concatenation of questions and passages
all_qp_concatenation_list = []
self.logger.info(
logging_prefix + f"Start concatenating question and passages for top {self.top_k_passages} passages")
# import pdb; pdb.set_trace()
num_clusters = 0
num_passages = 0
if self.args.passage_clustering and os.path.exists(self.clustered_passages_path): # check if there is clustered passagses (only need when passage clustering)
self.logger.info(
logging_prefix + "Loading clustering results...")
with open(self.clustered_passages_path, "rb") as fp:
clustering_results = pickle.load(fp)
num_clusters = clustering_results["num_clusters"]
num_passages = clustering_results["num_passages"]
num_questions = clustering_results["num_questions"]
questions_n_passages = clustering_results["questions_n_passages"]
else: # no PC or PC but no clusteres passages
# load all passages embedding or
init_top_k_passages() # init self.topKpassages
# if self.args.passage_clustering:
# self.top_k_passages = 100
# self.logger.info(
# logging_prefix + "Passage clustering takes all (top 100) passages")
# embedding_path = "data/wiki2020embedding/"
# self.logger.info(
# logging_prefix + "Loading passages embedding...")
# passage_embedding = load_passage_embeddings(embedding_path)
# self.passages = topKPassasages(
# self.top_k_passages, self.wiki_passage_path, self.ranking_path, self.data_path, passage_embedding=passage_embedding)
# else:
# self.passages = topKPassasages(
# self.top_k_passages, self.wiki_passage_path, self.ranking_path, self.data_path)
import pdb
pdb.set_trace()
# concatenate question and passages
self.logger.info(
logging_prefix + "Concatenating questions and passages...")
preprocess_qpa(questions, self.passages, answers,
self.args.top_k_passages,
self.answer_type, self.is_ambig, self.args.passage_clustering, False, logging_prefix, self.logger)
if self.args.passage_clustering:
for i in tqdm(range(len(questions))):
clusters_passages, num_cluster_for_question_i, num_passages_for_question_i = self.passages.get_clustered_passages(
i, self.args.rank_threshold) # 2-d list
num_clusters += num_cluster_for_question_i
num_passages += num_passages_for_question_i
# make questions[i] a list, put index 0 a concatenation of all passsages
all_qp_concatenation = questions[i]
# add concatenation of all in the first entry
for p_cluster in clusters_passages:
all_qp_concatenation += " <s> "
for p in p_cluster:
# format: [CLS] question [SEP] title 1 [SEP] passages
all_qp_concatenation += self.spaced_sep_token + \
p["title"] + \
self.spaced_sep_token + p["text"]
all_qp_concatenation += " </s> "
# cluster_qp_concatenation = questions[i]
questions_with_clustered_passages.append([])
questions_with_clustered_passages[i].append(all_qp_concatenation)
for p_cluster in clusters_passages: # it's ordered
cluster_qp_concatenation = questions[i] # reset qp concatenation
cluster_qp_concatenation += " <s> "
start = True
for p in p_cluster:
# format: [CLS] question [SEP] title 1 [SEP] passages
if start:
cluster_qp_concatenation += p["title"] + \
self.spaced_sep_token + p["text"]
else:
cluster_qp_concatenation += self.spaced_sep_token + \
p["title"] + self.spaced_sep_token + p["text"]
start = False
cluster_qp_concatenation += " </s> "
questions_with_clustered_passages[i].append(
cluster_qp_concatenation)
num_questions = len(questions)
clustering_results = dict()
clustering_results["num_clusters"] = num_clusters
clustering_results["num_passages"] = num_passages
clustering_results["num_questions"] = num_questions
clustering_results["questions_n_passages"] = questions_with_clustered_passages
with open(self.clustered_passages_path, "wb") as fp:
pickle.dump(clustering_results, fp)
# mark the begining of passages
questions_n_passages = questions_with_clustered_passages
else: # non-clustering
for i in tqdm(range(len(questions))):
questions_n_passages = questions
questions_n_passages[i] += " <s> " # start of passages
# add passage one by one
start = True
# NOTE: get passage clustering
for p in self.passages.get_passages(i, self.args.top_k_passages):
# format: [CLS] question [SEP] title 1 [SEP] passages
if start:
questions_n_passages[i] += p["title"] + \
self.spaced_sep_token + p["text"]
else:
questions_n_passages[i] += self.spaced_sep_token + \
p["title"] + self.spaced_sep_token + p["text"]
start = False
questions_n_passages[i] += " </s> "
if self.args.passage_clustering:
self.logger.info(
f"Average number of clusters is (better be around 2): {num_clusters/len(questions)}")
self.logger.info(
f"Avg num of passages per cluster: {num_passages/num_clusters}")
def is_answer_in_passages(answer_str, p_str):
"""check the existance of answer in passages by comparing string
Args:
idx ([type]): [description]
"""
return answer_str.lower() in p_str.lower()
def get_p_str(cur_qp, max_qp_length):
qp_ids = self.tokenizer.encode(
cur_qp)[:max_qp_length]
p_ids = qp_ids[qp_ids.index(eos_token_id):]
p_str = self.tokenizer.convert_ids_to_tokens(p_ids)
p_str = self.tokenizer.convert_tokens_to_string(
p_str)
return p_str
def concatenate_answers(answer_sets):
joined_answers = [answer for answer in itertools.product(*
answer_sets)]
concatenated_answers = [self.sep_token.join(
answer) for answer in joined_answers]
concatenated_answers = [
"<s>" + answer + "</s>" for answer in concatenated_answers]
# NOTE: add argument, num_k
max_num_of_answers = 100
if len(concatenated_answers) > max_num_of_answers:
rnd_indices = np.random.choice(
len(concatenated_answers), size=max_num_of_answers, replace=False)
concatenated_answers = [concatenated_answers[i]
for i in rnd_indices]
return concatenated_answers
def is_answer_a_date_or_infreq_digit(answer_str):
# example: 2, 3, 4 -> False
# example: july 25 2018 -> True
# example: more than 1 million -> it should be considered as a sequence of tokens which is more frequent
try:
if len(answer_str) < 4: # # example: 2, 3, 4
return False
parse(answer_str)
return True # example: july 25 2018
except ParserError:
answer_str = answer_str.replace(",","")
if parse_number(answer_str) != None: # example: 55,000
return True
return False # example: more than 1 million
except TypeError:
return False
new_questions = []
question_metadata = []
new_answers = []
answer_metadata = []
question_indices = []
eos_token_id = self.tokenizer.eos_token_id
max_qp_length = self.args.max_input_length
# # new_questions
# process answer for qp pair (answer must be present in qp pair)
# Q, P, A must be processed at the same time because they are affecting each other in the training setting
num_eliminated_qp = 0
answer_presence_d = defaultdict(lambda: 0)
for idx, (cur_qp, cur_md) in enumerate(zip(questions_n_passages, metadata)):
if self.args.passage_clustering:
cur_qp_str = cur_qp[0] # in the past, we only check the whole concatenations
else:
cur_qp_str = cur_qp # for non-PC, it should be normal top k passages
p_str = get_p_str(cur_qp_str, max_qp_length)
# check existance of answers
found_answers_for_one_question = []
for cur_md_for_qa_pair in cur_md:
found_answer_for_qa_pair = []
for start, end in cur_md_for_qa_pair: # iterate acceptable answer (semantically similar answers)
# acceptable answers for one qa pair
answer_for_qa_pair = answers[start:end]
for cur_a_str in answer_for_qa_pair:
if self.is_training and not self.args.debug:
if is_answer_in_passages(cur_a_str, p_str):
found_answer_for_qa_pair.append(
cur_a_str)
else: # add all answers in eval dataset or any dataset in debug mode no matter its presence in passages
found_answer_for_qa_pair.append(
cur_a_str)
if len(found_answer_for_qa_pair) > 0:
found_answers_for_one_question.append(
list(set(found_answer_for_qa_pair))) # NOTE: remove duplicated answers for one qa pair
if len(found_answers_for_one_question) == 0 and self.is_training:
# actually in dev mode, length is certainly larger than zero as we will add answer no matter its presence in passages
continue
# NOTE: for regular training mode(no passage clustering), we still add answers for every question
if self.is_training and self.args.passage_clustering : # add answers separately for each clusters for training + passage clustering mode
# new type of
# is_training -> seprate pairs of QP and A
# not is_training -> combine as we used to do
for (cluster_rank, cur_qp_str) in enumerate(cur_qp[1:]):
aug_times = 0
num_date = 0
num_long_answer = 0
found_answers_for_one_qp = []
# check answer presence in all answers
# add presented answers into answer
for cur_md_for_qa_pair in cur_md:
found_answer_for_qa_pair = []
# iterate acceptable answer (semantically similar answers)
for start, end in cur_md_for_qa_pair:
# acceptable answers for one qa pair
answer_for_qa_pair = answers[start:end]
for cur_a_str in answer_for_qa_pair:
# import pdb; pdb.set_trace()
# print("cur_a_str: ", cur_a_str)
# print("is_answer_in_passages: ", is_answer_in_passages(
# cur_a_str, cur_qp_str))
if is_answer_in_passages(cur_a_str, cur_qp_str):
found_answer_for_qa_pair.append(
cur_a_str)
answer_presence_d[cluster_rank] += 1
if is_answer_a_date_or_infreq_digit(cur_a_str):
num_date += 1
if len(cur_a_str.split(" ")) >= 4:
num_long_answer += 1
if len(found_answer_for_qa_pair) > 0:
found_answers_for_one_qp.append(found_answer_for_qa_pair)
# skip adding aligned questions and answers if not found qp
if len(found_answers_for_one_qp) == 0 and self.is_training:
num_eliminated_qp += 1 # no actually eliminated because
empty_answer_gen_ratio = 0.5
if np.random.rand() < empty_answer_gen_ratio:
empty_answer_str = "<s> </s>"
found_answer_for_qa_pair.append(
empty_answer_str)
found_answers_for_one_qp.append(
found_answer_for_qa_pair)
else:
continue
aug_times += len(found_answers_for_one_qp)
aug_times += num_date
aug_times += num_long_answer
# concatenate qp's answers
cur_answers = concatenate_answers(
found_answers_for_one_qp)
for i in range(aug_times):
# append for each QP passages
answer_start_idx = len(new_answers)
# maintain its 1-D format
new_answers.extend(cur_answers)
answer_end_idx = len(new_answers)
question_start_idx = len(new_questions)
new_questions.append(cur_qp_str)
question_end_idx = len(new_questions)
question_metadata.append(
(question_start_idx, question_end_idx)) # we actually added just a qp pair
answer_metadata.append(
(answer_start_idx, answer_end_idx))
else: # add concatenation of answers in eval dataset
joined_answers = [answer for answer in itertools.product(*
found_answers_for_one_question)]
joined_answers_l.append(joined_answers)
concatenated_answers = [self.sep_token.join(
answer) for answer in joined_answers]
concatenated_answers = [
"<s>" + answer + "</s>" for answer in concatenated_answers]
# NOTE: add argument, num_k
max_num_of_answers = 100
if len(concatenated_answers) > max_num_of_answers:
rnd_indices = np.random.choice(
len(concatenated_answers), size=max_num_of_answers, replace=False)
concatenated_answers = [concatenated_answers[i]
for i in rnd_indices]
cur_answers = concatenated_answers
answer_start_idx = len(new_answers)
# maintain its 1-D format
new_answers.extend(cur_answers)
answer_end_idx = len(new_answers)
question_start_idx = len(new_questions)
# rename for some clarity
question_id = self.question_ids[idx]
# import pdb; pdb.set_trace()
if self.args.passage_clustering:
# check cluster passages
new_questions.extend(cur_qp[1:])
question_indices.extend(
[question_id] * len(cur_qp[1:]))
else:
new_questions.append(cur_qp_str) #
question_indices.append(question_id)
# import pdb; pdb.set_trace()
# print("check first new_questions ")
question_end_idx = len(new_questions)
assert len(new_questions) == len(
question_indices), "length shoudl be the same"
# TODO: find a way to save the question ids
question_metadata.append(
(question_start_idx, question_end_idx))
answer_metadata.append(
(answer_start_idx, answer_end_idx))
if self.args.passage_clustering:
import pdb; pdb.set_trace()
print("check answer_presence_d")
print("check num_eliminated_qp ")
self.logger.info(logging_prefix + f"Selected qp ratio: {len(question_metadata)/len(questions_n_passages)}")
self.logger.info(
logging_prefix + f"num_eliminated_qp")
if len(question_indices) is not []:
self.question_ids = question_indices
# print("check question_ids set length")
# import pdb; pdb.set_trace()
questions = new_questions
answers = new_answers
# import pdb; pdb.set_trace()
print("answers example: ", answers[:30])
for (idx, joined_answers) in enumerate(joined_answers_l):
self.data[idx]["answers"] = joined_answers
self.logger.info(
logging_prefix + f"Start encoding questions ({len(questions)}) and answers, this might take a while")
question_input = tokenizer.batch_encode_plus(questions,
pad_to_max_length=True,
max_length=self.args.max_input_length,
truncation=True,
padding=True,
return_overflowing_tokens=True,
verbose=self.args.verbose)
max_answer_length = 30
answer_input = tokenizer.batch_encode_plus(answers,
pad_to_max_length=True,
max_length=max_answer_length,
truncation=True,
padding=True,
verbose=self.args.verbose)
dump_pickle(question_input, question_metadata, self.question_ids, answer_input, answer_metadata, joined_answers_l, encoded_input_path,
)
input_ids, attention_mask = question_input["input_ids"], question_input["attention_mask"]
decoder_input_ids, decoder_attention_mask = answer_input[
"input_ids"], answer_input["attention_mask"]
# if not self.is_training:
# decoder_input_ids= None
# decoder_attention_mask = None
# metadata = None
num_truncated_tokens = abs(sum(
question_input['num_truncated_tokens']))
num_quesiton_ids = sum(
[len(question) for question in question_input['input_ids']])
passage_coverage_rate = num_quesiton_ids / \
(num_truncated_tokens + num_quesiton_ids)
self.logger.info(
logging_prefix + f"Number of truncated tokens: {num_truncated_tokens}")
self.logger.info(
logging_prefix + f"Passage kept rate(after truncation): {passage_coverage_rate * 100} %")
elif self.answer_type == "span":
question_metadata = None
# assume questions = [Q1, Q2]
# answers = [[A1 <SEP> A2], [A3]]
# all titles = [ [T1, T2, ..., T100], [T1, T2, ..., T100] ]
# TODO: add some of these arguments into questions
all_titles = []
all_passages = []
init_top_k_passages()
# for each question, add a list of passages info from reranking results
# all titles and all passages should be a 2-d list
for i in tqdm(range(len(questions))):
cur_titles = []
cur_passages = []
for p in self.passages.get_passages(i, self.args.top_k_passages):
cur_titles.append(p["title"])
cur_passages.append(p["text"])
all_titles.append(cur_titles)
all_passages.append(cur_passages)
self.logger.info(logging_prefix +
"Start preprocessing span input")
d = preprocess_span_input(
encoded_input_path, encoded_answer_path, metadata_path,
self.logger, tokenizer, self.max_input_length,
questions=questions, answers=answers, metadata=metadata, all_titles=all_titles, all_passages=all_passages, is_training=self.is_training)
input_ids = d["input_ids"]
attention_mask = d["attention_mask"]
token_type_ids = d["token_type_ids"]
start_positions = d["start_positions"]
end_positions = d["end_positions"]
answer_mask = d["answer_mask"]
# Q: input (QA concatenation, y= answer?)
# label is the start and end positions
answer_coverage_rate = d["answer_coverage_rate"]
else:
print("Unrecognizable answer type")
exit()
if self.load:
with open(tokenized_path, "w") as fp:
if self.answer_type == "seq":
json.dump([input_ids, question_metadata, attention_mask,
decoder_input_ids, decoder_attention_mask,
answer_metadata, self.data, passage_coverage_rate], fp)
elif self.answer_type == "span":
json.dump([input_ids, attention_mask, token_type_ids, start_positions,
end_positions, answer_mask, answer_coverage_rate], fp)
# loading dataset
if self.answer_type == "seq":
question_metadata = None # as I shift to use question_indices instead
self.dataset = QAGenDataset(input_ids, attention_mask,
decoder_input_ids, decoder_attention_mask,
passage_clustering=self.args.passage_clustering,
question_ids=self.question_ids,
in_metadata=question_metadata, out_metadata=answer_metadata,
is_training=self.is_training)
elif self.answer_type == "span":
# batch size x max_n_answer
list_of_tensors = self.tensorize(
input_ids, attention_mask, token_type_ids, start_positions, end_positions, answer_mask)
self.dataset = TensorDataset(*list_of_tensors)
else:
print("wrong self.answer_type argument")
exit()
self.logger.info(
logging_prefix + "Loaded {} examples from {} data".format(len(self.dataset), self.data_type))
# make sure all questions are included in evaluation mode
# it no longer work for clustered passages
# if not self.is_training:
# assert len(input_ids) == len(self), (len(input_ids), len(self))
self.logger.info("DEV length check has passed")
if do_return: