Commit 19f50f89 authored by yangql's avatar yangql
Browse files

Update README.md, test_quant.py files

parent 6d26a146
...@@ -37,6 +37,9 @@ cd dist && pip3 install auto_gptq* ...@@ -37,6 +37,9 @@ cd dist && pip3 install auto_gptq*
#### 量化模型 #### 量化模型
方式1、 方式1、
下载https://huggingface.co/datasets/llm-wizard/alpaca-gpt4-data-zh
```shell ```shell
python test_quant.py python test_quant.py
``` ```
......
...@@ -28,7 +28,7 @@ def gptq(): ...@@ -28,7 +28,7 @@ def gptq():
model = AutoGPTQForCausalLM.from_pretrained(model_path, quantize_config) model = AutoGPTQForCausalLM.from_pretrained(model_path, quantize_config)
# https://huggingface.co/datasets/llm-wizard/alpaca-gpt4-data-zh # https://huggingface.co/datasets/llm-wizard/alpaca-gpt4-data-zh
file_path = '../alpaca_gpt4_data_zh.json' file_path = './alpaca_gpt4_data_zh.json'
# file_path = 'oaast_rm_zh.json' # file_path = 'oaast_rm_zh.json'
messages = [] messages = []
with open(file_path, 'r',encoding='UTF-8') as fcc_file: with open(file_path, 'r',encoding='UTF-8') as fcc_file:
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment