Commit 67e8d6e9 authored by wooway777's avatar wooway777
Browse files

issue/208 - adapt to ali ppu

parent 6cc680ba
......@@ -157,6 +157,11 @@ def get_args():
action="store_true",
help="Run cambricon test",
)
parser.add_argument(
"--ali",
action="store_true",
help="Run alippu test",
)
parser.add_argument(
"--model",
type=str,
......@@ -351,6 +356,8 @@ if __name__ == "__main__":
device_str = "cuda"
elif args.cambricon:
device_str = "mlu"
elif args.ali:
device_str = "cuda"
else:
print(
"python examples/bench.py --nvidia --model=~/TinyLlama-1.1B-Chat-v1.0/ --batch-size=2 --tp=1 --input-len=50 --output-len=50"
......
......@@ -47,6 +47,11 @@ def get_args():
action="store_true",
help="Run cambricon test",
)
parser.add_argument(
"--ali",
action="store_true",
help="Run alippu test",
)
parser.add_argument(
"--hygon",
action="store_true",
......@@ -257,11 +262,13 @@ if __name__ == "__main__":
device_str = "cuda"
elif args.cambricon:
device_str = "mlu"
elif args.ali:
device_str = "cuda"
elif args.hygon:
device_str = "cuda"
else:
print(
"Usage: python examples/jiuge.py [--cpu | --nvidia | --metax | --moore | --iluvatar | --cambricon | --hygon] --model_path=<path/to/model_dir>\n"
"Usage: python examples/jiuge.py [--cpu | --nvidia | --metax | --moore | --iluvatar | --cambricon | --ali | --hygon] --model_path=<path/to/model_dir>\n"
"such as, python examples/jiuge.py --nvidia --model_path=~/TinyLlama-1.1B-Chat-v1.0"
)
sys.exit(1)
......
......@@ -487,6 +487,7 @@ def parse_args():
parser.add_argument("--moore", action="store_true", help="Use Moore device")
parser.add_argument("--iluvatar", action="store_true", help="Use Iluvatar device")
parser.add_argument("--cambricon", action="store_true", help="Use Cambricon device")
parser.add_argument("--ali", action="store_true", help="Use Ali PPU device")
parser.add_argument(
"--enable-graph",
action="store_true",
......@@ -520,9 +521,11 @@ def main():
device = "cuda"
elif args.cambricon:
device = "mlu"
elif args.ali:
device = "cuda"
else:
print(
"Usage: python infinilm.server.inference_server [--cpu | --nvidia | --metax | --moore | --iluvatar | --cambricon] "
"Usage: python infinilm.server.inference_server [--cpu | --nvidia | --metax | --moore | --iluvatar | --cambricon | --ali] "
"--model_path=<path/to/model_dir> --max_tokens=MAX_TOKENS --max_batch_size=MAX_BATCH_SIZE"
"\n"
"Example: python infinilm.server.inference_server --nvidia --model_path=/data/shared/models/9G7B_MHA/ "
......
......@@ -860,9 +860,11 @@ def test():
device_type = DeviceType.DEVICE_TYPE_KUNLUN
elif sys.argv[1] == "--hygon":
device_type = DeviceType.DEVICE_TYPE_HYGON
elif sys.argv[1] == "--ali":
device_type = DeviceType.DEVICE_TYPE_ALI
else:
print(
"Usage: python jiuge.py [--cpu | --nvidia| --qy| --cambricon | --ascend | --metax | --moore | --iluvatar | --kunlun | --hygon] <path/to/model_dir> [n_device] [--verbose]"
"Usage: python jiuge.py [--cpu | --nvidia| --qy| --cambricon | --ascend | --metax | --moore | --iluvatar | --kunlun | --hygon | --ali] <path/to/model_dir> [n_device] [--verbose]"
)
sys.exit(1)
......
......@@ -37,6 +37,7 @@ class DeviceType(ctypes.c_int):
DEVICE_TYPE_KUNLUN = 7
DEVICE_TYPE_HYGON = 8
DEVICE_TYPE_QY = 9
DEVICE_TYPE_ALI = 10
class KVCacheCStruct(ctypes.Structure):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment