Unverified Commit 11007392 authored by PanZezhong1725's avatar PanZezhong1725 Committed by GitHub
Browse files

Merge pull request #158 from InfiniTensor/issue/157

issue/157 - add/adjust cambricon support in scripts
parents d0239867 9f1e0cb0
...@@ -135,6 +135,11 @@ def get_args(): ...@@ -135,6 +135,11 @@ def get_args():
action="store_true", action="store_true",
help="Run nvidia test", help="Run nvidia test",
) )
parser.add_argument(
"--cambricon",
action="store_true",
help="Run cambricon test",
)
parser.add_argument( parser.add_argument(
"--model", "--model",
type=str, type=str,
...@@ -268,6 +273,8 @@ if __name__ == "__main__": ...@@ -268,6 +273,8 @@ if __name__ == "__main__":
device_str = "cpu" device_str = "cpu"
elif args.nvidia: elif args.nvidia:
device_str = "cuda" device_str = "cuda"
elif args.cambricon:
device_str = "mlu"
else: else:
print( print(
"python examples/bench.py --nvidia --model=~/TinyLlama-1.1B-Chat-v1.0/ --batch-size=2 --tp=1 --input-len=50 --output-len=50" "python examples/bench.py --nvidia --model=~/TinyLlama-1.1B-Chat-v1.0/ --batch-size=2 --tp=1 --input-len=50 --output-len=50"
......
...@@ -40,6 +40,11 @@ def get_args(): ...@@ -40,6 +40,11 @@ def get_args():
action="store_true", action="store_true",
help="Run iluvatar test", help="Run iluvatar test",
) )
parser.add_argument(
"--cambricon",
action="store_true",
help="Run cambricon test",
)
parser.add_argument( parser.add_argument(
"--model_path", "--model_path",
type=str, type=str,
...@@ -188,6 +193,8 @@ if __name__ == "__main__": ...@@ -188,6 +193,8 @@ if __name__ == "__main__":
device_str = "musa" device_str = "musa"
elif args.iluvatar: elif args.iluvatar:
device_str = "cuda" device_str = "cuda"
elif args.cambricon:
device_str = "mlu"
else: else:
print( print(
"Usage: python examples/jiuge.py [--cpu | --nvidia | --metax | --moore | --iluvatar] --model_path=<path/to/model_dir>\n" "Usage: python examples/jiuge.py [--cpu | --nvidia | --metax | --moore | --iluvatar] --model_path=<path/to/model_dir>\n"
......
...@@ -63,7 +63,7 @@ class InfiniLMBenchmark(BaseBenchmark): ...@@ -63,7 +63,7 @@ class InfiniLMBenchmark(BaseBenchmark):
device_map = { device_map = {
"cpu": "cpu", "cpu": "cpu",
"nvidia": "cuda", "nvidia": "cuda",
"cambricon": "cambricon", "cambricon": "mlu",
"ascend": "ascend", "ascend": "ascend",
"metax": "metax", "metax": "metax",
"moore": "moore", "moore": "moore",
...@@ -210,6 +210,8 @@ class TorchBenchmark(BaseBenchmark): ...@@ -210,6 +210,8 @@ class TorchBenchmark(BaseBenchmark):
self.device = torch.device("cuda") self.device = torch.device("cuda")
elif device_type_str == "cpu": elif device_type_str == "cpu":
self.device = torch.device("cpu") self.device = torch.device("cpu")
elif device_type_str == "cambricon":
self.device = torch.device("mlu")
else: else:
raise ValueError( raise ValueError(
f"Torch backend unsupported device type: {device_type_str}" f"Torch backend unsupported device type: {device_type_str}"
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment