"flashlight python bindings are required to use this functionality. Please install from https://github.com/facebookresearch/flashlight/tree/master/bindings/python"
)
LM=object
LMState=object
classW2lDecoder(object):
def__init__(self,args,tgt_dict):
self.tgt_dict=tgt_dict
self.vocab_size=len(tgt_dict)
self.nbest=args.nbest
# criterion-specific init
self.criterion_type=CriterionType.CTC
self.blank=(
tgt_dict.index("<ctc_blank>")
if"<ctc_blank>"intgt_dict.indices
elsetgt_dict.bos()
)
if"<sep>"intgt_dict.indices:
self.silence=tgt_dict.index("<sep>")
elif"|"intgt_dict.indices:
self.silence=tgt_dict.index("|")
else:
self.silence=tgt_dict.eos()
self.asg_transitions=None
defgenerate(self,models,sample,**unused):
"""Generate a batch of inferences."""
# model.forward normally channels prev_output_tokens into the decoder
# separately, but SequenceGenerator directly calls model.encoder