Commit 80047314 authored by mpatwary's avatar mpatwary
Browse files

removed commnets

parent b9fcb7b4
...@@ -88,16 +88,10 @@ class BiEncoderModel(MegatronModule): ...@@ -88,16 +88,10 @@ class BiEncoderModel(MegatronModule):
def set_input_tensor(self, input_tensor): def set_input_tensor(self, input_tensor):
"""See megatron.model.transformer.set_input_tensor()""" """See megatron.model.transformer.set_input_tensor()"""
#this is just a placeholder and will be needed when model
#parallelism will be used
#self.language_model.set_input_tensor(input_tensor) #self.language_model.set_input_tensor(input_tensor)
return return
# #if self._model_key is not None:
# # print("_model_key {}".format(self._model_key), flush=True)
# print(input_tensor)
# if self._query_key is not None:
# print("_query_key {}".format(self._query_key), flush=True)
# if self._context_key is not None:
# print("_context_key {}".format(self._context_key), flush=True)
# exit()
def forward(self, query_tokens, query_attention_mask, query_types, def forward(self, query_tokens, query_attention_mask, query_types,
context_tokens, context_attention_mask, context_types): context_tokens, context_attention_mask, context_types):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment