Unverified Commit a6b4d1ad authored by Sylvain Gugger's avatar Sylvain Gugger
Browse files

Remove print statement

parent 6c134444
...@@ -904,7 +904,6 @@ def _decode_asr(tokenizer, model_outputs, *, return_timestamps, return_language, ...@@ -904,7 +904,6 @@ def _decode_asr(tokenizer, model_outputs, *, return_timestamps, return_language,
if current_tokens: if current_tokens:
previous_tokens.append(current_tokens) previous_tokens.append(current_tokens)
elif not (any(p for p in previous_tokens)): elif not (any(p for p in previous_tokens)):
# print("Flushing previous tokens (END)")
chunk = new_chunk() chunk = new_chunk()
previous_tokens = [] previous_tokens = []
current_tokens = [] current_tokens = []
...@@ -917,7 +916,6 @@ def _decode_asr(tokenizer, model_outputs, *, return_timestamps, return_language, ...@@ -917,7 +916,6 @@ def _decode_asr(tokenizer, model_outputs, *, return_timestamps, return_language,
) )
# Happens when we don't use timestamps # Happens when we don't use timestamps
resolved_tokens = _find_longest_common_sequence(previous_tokens) resolved_tokens = _find_longest_common_sequence(previous_tokens)
# print("Flushing previous tokens (FINAL)")
resolved_text = tokenizer.decode(resolved_tokens) resolved_text = tokenizer.decode(resolved_tokens)
chunk["text"] = resolved_text chunk["text"] = resolved_text
chunks.append(chunk) chunks.append(chunk)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment