Unverified Commit 68427c9b authored by Nicolas Patry's avatar Nicolas Patry Committed by GitHub
Browse files

Fixing slow pipeline tests (#14260)

* Fiixng slow pipeline tests

* Remove the image-segmentaiton override.

* Fixing clamping only in training.

* Wav2vec2.

* Remove last mention of `no_grad`.

* Fixing copies.

* Rename.
parent 1a674ce6
...@@ -648,9 +648,10 @@ class DetrEncoderLayer(nn.Module): ...@@ -648,9 +648,10 @@ class DetrEncoderLayer(nn.Module):
hidden_states = residual + hidden_states hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.final_layer_norm(hidden_states)
if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any(): if self.training:
clamp_value = torch.finfo(hidden_states.dtype).max - 1000 if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any():
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,) outputs = (hidden_states,)
......
...@@ -947,7 +947,10 @@ class UniSpeechPreTrainedModel(PreTrainedModel): ...@@ -947,7 +947,10 @@ class UniSpeechPreTrainedModel(PreTrainedModel):
return input_lengths return input_lengths
def _get_feature_vector_attention_mask(self, feature_vector_length: int, attention_mask: torch.LongTensor): def _get_feature_vector_attention_mask(self, feature_vector_length: int, attention_mask: torch.LongTensor):
output_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long) # Effectively attention_mask.sum(-1), but not inplace to be able to run
# on inference mode.
non_padded_lengths = attention_mask.cumsum(dim=-1)[:, -1]
output_lengths = self._get_feat_extract_output_lengths(non_padded_lengths).to(torch.long)
batch_size = attention_mask.shape[0] batch_size = attention_mask.shape[0]
attention_mask = torch.zeros( attention_mask = torch.zeros(
......
...@@ -948,7 +948,10 @@ class UniSpeechSatPreTrainedModel(PreTrainedModel): ...@@ -948,7 +948,10 @@ class UniSpeechSatPreTrainedModel(PreTrainedModel):
return input_lengths return input_lengths
def _get_feature_vector_attention_mask(self, feature_vector_length: int, attention_mask: torch.LongTensor): def _get_feature_vector_attention_mask(self, feature_vector_length: int, attention_mask: torch.LongTensor):
output_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long) # Effectively attention_mask.sum(-1), but not inplace to be able to run
# on inference mode.
non_padded_lengths = attention_mask.cumsum(dim=-1)[:, -1]
output_lengths = self._get_feat_extract_output_lengths(non_padded_lengths).to(torch.long)
batch_size = attention_mask.shape[0] batch_size = attention_mask.shape[0]
attention_mask = torch.zeros( attention_mask = torch.zeros(
......
...@@ -989,7 +989,10 @@ class Wav2Vec2PreTrainedModel(PreTrainedModel): ...@@ -989,7 +989,10 @@ class Wav2Vec2PreTrainedModel(PreTrainedModel):
return input_lengths return input_lengths
def _get_feature_vector_attention_mask(self, feature_vector_length: int, attention_mask: torch.LongTensor): def _get_feature_vector_attention_mask(self, feature_vector_length: int, attention_mask: torch.LongTensor):
output_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long) # Effectively attention_mask.sum(-1), but not inplace to be able to run
# on inference mode.
non_padded_lengths = attention_mask.cumsum(dim=-1)[:, -1]
output_lengths = self._get_feat_extract_output_lengths(non_padded_lengths).to(torch.long)
batch_size = attention_mask.shape[0] batch_size = attention_mask.shape[0]
attention_mask = torch.zeros( attention_mask = torch.zeros(
......
...@@ -91,9 +91,6 @@ class ImageSegmentationPipeline(Pipeline): ...@@ -91,9 +91,6 @@ class ImageSegmentationPipeline(Pipeline):
return super().__call__(*args, **kwargs) return super().__call__(*args, **kwargs)
def get_inference_context(self):
return torch.no_grad
def preprocess(self, image): def preprocess(self, image):
image = load_image(image) image = load_image(image)
target_size = torch.IntTensor([[image.height, image.width]]) target_size = torch.IntTensor([[image.height, image.width]])
......
...@@ -93,76 +93,74 @@ class TableQuestionAnsweringPipeline(Pipeline): ...@@ -93,76 +93,74 @@ class TableQuestionAnsweringPipeline(Pipeline):
) )
def batch_inference(self, **inputs): def batch_inference(self, **inputs):
with torch.no_grad(): return self.model(**inputs)
return self.model(**inputs)
def sequential_inference(self, **inputs): def sequential_inference(self, **inputs):
""" """
Inference used for models that need to process sequences in a sequential fashion, like the SQA models which Inference used for models that need to process sequences in a sequential fashion, like the SQA models which
handle conversational query related to a table. handle conversational query related to a table.
""" """
with torch.no_grad(): all_logits = []
all_logits = [] all_aggregations = []
all_aggregations = [] prev_answers = None
prev_answers = None batch_size = inputs["input_ids"].shape[0]
batch_size = inputs["input_ids"].shape[0]
input_ids = inputs["input_ids"].to(self.device)
input_ids = inputs["input_ids"].to(self.device) attention_mask = inputs["attention_mask"].to(self.device)
attention_mask = inputs["attention_mask"].to(self.device) token_type_ids = inputs["token_type_ids"].to(self.device)
token_type_ids = inputs["token_type_ids"].to(self.device) token_type_ids_example = None
token_type_ids_example = None
for index in range(batch_size):
for index in range(batch_size): # If sequences have already been processed, the token type IDs will be created according to the previous
# If sequences have already been processed, the token type IDs will be created according to the previous # answer.
# answer. if prev_answers is not None:
if prev_answers is not None: prev_labels_example = token_type_ids_example[:, 3] # shape (seq_len,)
prev_labels_example = token_type_ids_example[:, 3] # shape (seq_len,) model_labels = np.zeros_like(prev_labels_example.cpu().numpy()) # shape (seq_len,)
model_labels = np.zeros_like(prev_labels_example.cpu().numpy()) # shape (seq_len,)
token_type_ids_example = token_type_ids[index] # shape (seq_len, 7)
for i in range(model_labels.shape[0]):
segment_id = token_type_ids_example[:, 0].tolist()[i]
col_id = token_type_ids_example[:, 1].tolist()[i] - 1
row_id = token_type_ids_example[:, 2].tolist()[i] - 1
if row_id >= 0 and col_id >= 0 and segment_id == 1:
model_labels[i] = int(prev_answers[(col_id, row_id)])
token_type_ids_example[:, 3] = torch.from_numpy(model_labels).type(torch.long).to(self.device)
input_ids_example = input_ids[index]
attention_mask_example = attention_mask[index] # shape (seq_len,)
token_type_ids_example = token_type_ids[index] # shape (seq_len, 7) token_type_ids_example = token_type_ids[index] # shape (seq_len, 7)
outputs = self.model( for i in range(model_labels.shape[0]):
input_ids=input_ids_example.unsqueeze(0), segment_id = token_type_ids_example[:, 0].tolist()[i]
attention_mask=attention_mask_example.unsqueeze(0), col_id = token_type_ids_example[:, 1].tolist()[i] - 1
token_type_ids=token_type_ids_example.unsqueeze(0), row_id = token_type_ids_example[:, 2].tolist()[i] - 1
)
logits = outputs.logits
if self.aggregate: if row_id >= 0 and col_id >= 0 and segment_id == 1:
all_aggregations.append(outputs.logits_aggregation) model_labels[i] = int(prev_answers[(col_id, row_id)])
all_logits.append(logits) token_type_ids_example[:, 3] = torch.from_numpy(model_labels).type(torch.long).to(self.device)
dist_per_token = torch.distributions.Bernoulli(logits=logits) input_ids_example = input_ids[index]
probabilities = dist_per_token.probs * attention_mask_example.type(torch.float32).to( attention_mask_example = attention_mask[index] # shape (seq_len,)
dist_per_token.probs.device token_type_ids_example = token_type_ids[index] # shape (seq_len, 7)
) outputs = self.model(
input_ids=input_ids_example.unsqueeze(0),
attention_mask=attention_mask_example.unsqueeze(0),
token_type_ids=token_type_ids_example.unsqueeze(0),
)
logits = outputs.logits
coords_to_probs = collections.defaultdict(list) if self.aggregate:
for i, p in enumerate(probabilities.squeeze().tolist()): all_aggregations.append(outputs.logits_aggregation)
segment_id = token_type_ids_example[:, 0].tolist()[i]
col = token_type_ids_example[:, 1].tolist()[i] - 1 all_logits.append(logits)
row = token_type_ids_example[:, 2].tolist()[i] - 1
if col >= 0 and row >= 0 and segment_id == 1: dist_per_token = torch.distributions.Bernoulli(logits=logits)
coords_to_probs[(col, row)].append(p) probabilities = dist_per_token.probs * attention_mask_example.type(torch.float32).to(
dist_per_token.probs.device
)
coords_to_probs = collections.defaultdict(list)
for i, p in enumerate(probabilities.squeeze().tolist()):
segment_id = token_type_ids_example[:, 0].tolist()[i]
col = token_type_ids_example[:, 1].tolist()[i] - 1
row = token_type_ids_example[:, 2].tolist()[i] - 1
if col >= 0 and row >= 0 and segment_id == 1:
coords_to_probs[(col, row)].append(p)
prev_answers = {key: np.array(coords_to_probs[key]).mean() > 0.5 for key in coords_to_probs} prev_answers = {key: np.array(coords_to_probs[key]).mean() > 0.5 for key in coords_to_probs}
logits_batch = torch.cat(tuple(all_logits), 0) logits_batch = torch.cat(tuple(all_logits), 0)
return (logits_batch,) if not self.aggregate else (logits_batch, torch.cat(tuple(all_aggregations), 0)) return (logits_batch,) if not self.aggregate else (logits_batch, torch.cat(tuple(all_aggregations), 0))
def __call__(self, *args, **kwargs): def __call__(self, *args, **kwargs):
r""" r"""
......
...@@ -117,7 +117,7 @@ class AudioClassificationPipelineTests(unittest.TestCase, metaclass=PipelineTest ...@@ -117,7 +117,7 @@ class AudioClassificationPipelineTests(unittest.TestCase, metaclass=PipelineTest
self.assertEqual( self.assertEqual(
nested_simplify(output, decimals=4), nested_simplify(output, decimals=4),
[ [
{"score": 0.9809, "label": "go"}, {"score": 0.981, "label": "go"},
{"score": 0.0073, "label": "up"}, {"score": 0.0073, "label": "up"},
{"score": 0.0064, "label": "_unknown_"}, {"score": 0.0064, "label": "_unknown_"},
{"score": 0.0015, "label": "down"}, {"score": 0.0015, "label": "down"},
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment