"vscode:/vscode.git/clone" did not exist on "b4c18a830a1eeb0ffc646a2a881526092b3b1a22"
Unverified Commit f26a3530 authored by Rishabh Manoj's avatar Rishabh Manoj Committed by GitHub
Browse files

Update pipelines.py

Modified QA pipeline to consider all features for each example before generating topk answers. 
Current pipeline only takes one SquadExample, one SquadFeature, one start logit list, one end logit list to retrieve the answer, this is not correct as one SquadExample can produce multiple SquadFeatures.
parent 16ce15ed
...@@ -705,9 +705,16 @@ class QuestionAnsweringPipeline(Pipeline): ...@@ -705,9 +705,16 @@ class QuestionAnsweringPipeline(Pipeline):
# Convert inputs to features # Convert inputs to features
examples = self._args_parser(*texts, **kwargs) examples = self._args_parser(*texts, **kwargs)
features = squad_convert_examples_to_features( features_list = [ squad_convert_examples_to_features(
examples, self.tokenizer, kwargs["max_seq_len"], kwargs["doc_stride"], kwargs["max_question_len"], False [example],
) self.tokenizer,
kwargs["max_seq_len"],
kwargs["doc_stride"],
kwargs["max_question_len"],
False
) for example in examples ]
all_answers = []
for features, example in zip(features_list, examples):
fw_args = self.inputs_for_model([f.__dict__ for f in features]) fw_args = self.inputs_for_model([f.__dict__ for f in features])
# Manage tensor allocation on correct device # Manage tensor allocation on correct device
...@@ -724,7 +731,7 @@ class QuestionAnsweringPipeline(Pipeline): ...@@ -724,7 +731,7 @@ class QuestionAnsweringPipeline(Pipeline):
start, end = start.cpu().numpy(), end.cpu().numpy() start, end = start.cpu().numpy(), end.cpu().numpy()
answers = [] answers = []
for (example, feature, start_, end_) in zip(examples, features, start, end): for (feature, start_, end_) in zip(features, start, end):
# Normalize logits and spans to retrieve the answer # Normalize logits and spans to retrieve the answer
start_ = np.exp(start_) / np.sum(np.exp(start_)) start_ = np.exp(start_) / np.sum(np.exp(start_))
end_ = np.exp(end_) / np.sum(np.exp(end_)) end_ = np.exp(end_) / np.sum(np.exp(end_))
...@@ -751,9 +758,12 @@ class QuestionAnsweringPipeline(Pipeline): ...@@ -751,9 +758,12 @@ class QuestionAnsweringPipeline(Pipeline):
} }
for s, e, score in zip(starts, ends, scores) for s, e, score in zip(starts, ends, scores)
] ]
if len(answers) == 1: answers = sorted(answers, key = lambda x:x['score'], reverse=True)[:kwargs["topk"]]
return answers[0] all_answers+=answers
return answers
if len(all_answers) == 1:
return all_answers[0]
return all_answers
def decode(self, start: np.ndarray, end: np.ndarray, topk: int, max_answer_len: int) -> Tuple: def decode(self, start: np.ndarray, end: np.ndarray, topk: int, max_answer_len: int) -> Tuple:
""" """
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment