Unverified Commit 239ace15 authored by Xiaoli Wang's avatar Xiaoli Wang Committed by GitHub
Browse files

Fix TypeError: Object of type int64 is not JSON serializable (#24340)

* Fix TypeError: Object of type int64 is not JSON serializable

* Convert numpy.float64 and numpy.int64 to float and int for json serialization

* Black reformatted examples/pytorch/token-classification/run_ner_no_trainer.py

* * make style
parent ac19871c
...@@ -56,8 +56,8 @@ class Seq2seqTrainerTester(TestCasePlus): ...@@ -56,8 +56,8 @@ class Seq2seqTrainerTester(TestCasePlus):
] ]
batch["decoder_attention_mask"] = outputs.attention_mask batch["decoder_attention_mask"] = outputs.attention_mask
assert all([len(x) == 512 for x in inputs.input_ids]) assert all(len(x) == 512 for x in inputs.input_ids)
assert all([len(x) == 128 for x in outputs.input_ids]) assert all(len(x) == 128 for x in outputs.input_ids)
return batch return batch
......
...@@ -362,7 +362,7 @@ def convert_to_localized_md(model_list, localized_model_list, format_str): ...@@ -362,7 +362,7 @@ def convert_to_localized_md(model_list, localized_model_list, format_str):
model_keys = [re.search(r"\*\*\[([^\]]*)", line).groups()[0] for line in model_list.strip().split("\n")] model_keys = [re.search(r"\*\*\[([^\]]*)", line).groups()[0] for line in model_list.strip().split("\n")]
# We exclude keys in localized README not in the main one. # We exclude keys in localized README not in the main one.
readmes_match = not any([k not in model_keys for k in localized_model_index]) readmes_match = not any(k not in model_keys for k in localized_model_index)
localized_model_index = {k: v for k, v in localized_model_index.items() if k in model_keys} localized_model_index = {k: v for k, v in localized_model_index.items() if k in model_keys}
for model in model_list.strip().split("\n"): for model in model_list.strip().split("\n"):
......
...@@ -735,7 +735,7 @@ def build_model(model_arch, tiny_config, output_dir): ...@@ -735,7 +735,7 @@ def build_model(model_arch, tiny_config, output_dir):
tiny_config = copy.deepcopy(tiny_config) tiny_config = copy.deepcopy(tiny_config)
if any([model_arch.__name__.endswith(x) for x in ["ForCausalLM", "LMHeadModel"]]): if any(model_arch.__name__.endswith(x) for x in ["ForCausalLM", "LMHeadModel"]):
tiny_config.is_encoder_decoder = False tiny_config.is_encoder_decoder = False
tiny_config.is_decoder = True tiny_config.is_decoder = True
......
...@@ -428,7 +428,7 @@ def get_module_dependencies(module_fname, cache=None): ...@@ -428,7 +428,7 @@ def get_module_dependencies(module_fname, cache=None):
# So we get the imports from that init then try to find where our objects come from. # So we get the imports from that init then try to find where our objects come from.
new_imported_modules = extract_imports(module, cache=cache) new_imported_modules = extract_imports(module, cache=cache)
for new_module, new_imports in new_imported_modules: for new_module, new_imports in new_imported_modules:
if any([i in new_imports for i in imports]): if any(i in new_imports for i in imports):
if new_module not in dependencies: if new_module not in dependencies:
new_modules.append((new_module, [i for i in new_imports if i in imports])) new_modules.append((new_module, [i for i in new_imports if i in imports]))
imports = [i for i in imports if i not in new_imports] imports = [i for i in imports if i not in new_imports]
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment