input_ids=tf.convert_to_tensor([[1,14,2232,26,1]],dtype=tf.int32)# the dog is cute
input_ids=tf.convert_to_tensor([[14,447]],dtype=tf.int32)# the president
expected_output_ids=[
1,
14,
2232,
26,
1,
567,
26,
32,
149,
149,
149,
149,
149,
149,
149,
149,
149,
149,
149,
149,
]# The dog is nothing is it!!!!!!!!!!!! TODO (PVP): this sentence (and others I tried) does not make much sense, there seems to be a problem with xlm language generation.
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
]# the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
input_ids=torch.tensor([[1,14,2232,26,1]],dtype=torch.long,device=torch_device)# The dog is cute
input_ids=torch.tensor([[14,447]],dtype=torch.long,device=torch_device)# the president
expected_output_ids=[
1,
14,
2232,
26,
1,
567,
26,
32,
149,
149,
149,
149,
149,
149,
149,
149,
149,
149,
149,
149,
]# The dog is nothing is it!!!!!!!!!!!! TODO (PVP): this sentence (and others I tried) does not make much sense, there seems to be a problem with xlm language generation.
]# the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference