Commit 6f5d13e8 authored by liucong's avatar liucong
Browse files

修改代码和文档

parent 62fb7f6b
...@@ -22,6 +22,8 @@ docker pull image.sourcefind.cn:5000/dcu/admin/base/custom:ort_dcu_1.14.0_migrap ...@@ -22,6 +22,8 @@ docker pull image.sourcefind.cn:5000/dcu/admin/base/custom:ort_dcu_1.14.0_migrap
pip install -r requirement.txt pip install -r requirement.txt
``` ```
本次采用GPT-2模型进行诗词生成任务,模型文件下载链接:https://pan.baidu.com/s/1KWeoUuakCZ5dualK69qCcw , 提取码:4pmh ,并将GPT2_shici.onnx模型文件保存在model文件夹下。
### 运行示例 ### 运行示例
我们提供了基于MIGraphX的推理脚本,版本依赖: 我们提供了基于MIGraphX的推理脚本,版本依赖:
......
...@@ -5,7 +5,7 @@ import migraphx ...@@ -5,7 +5,7 @@ import migraphx
# 加载词汇表 # 加载词汇表
print("INFO: Complete loading the vocabulary") print("INFO: Complete loading the vocabulary")
vocab_file = os.path.join('../../../Resource/Models/NLP/GPT2', 'vocab_shici.txt') vocab_file = os.path.join('./model', 'vocab_shici.txt')
tokenizer = BertTokenizerFast(vocab_file, sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]") tokenizer = BertTokenizerFast(vocab_file, sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]")
# 设置最大输入shape # 设置最大输入shape
...@@ -13,7 +13,7 @@ maxInput={"input":[1,1024]} ...@@ -13,7 +13,7 @@ maxInput={"input":[1,1024]}
# 加载模型 # 加载模型
print("INFO: Parsing and compiling the model") print("INFO: Parsing and compiling the model")
model = migraphx.parse_onnx("../../../Resource/Models/NLP/GPT2/GPT2_shici.onnx", map_input_dims=maxInput) model = migraphx.parse_onnx("./model/GPT2_shici.onnx", map_input_dims=maxInput)
inputName=model.get_parameter_names()[0] inputName=model.get_parameter_names()[0]
inputShape=model.get_parameter_shapes()[inputName].lens() inputShape=model.get_parameter_shapes()[inputName].lens()
print("inputName:{0} \ninputShape:{1}".format(inputName,inputShape)) print("inputName:{0} \ninputShape:{1}".format(inputName,inputShape))
......
This diff is collapsed.
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment