Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
AutoAWQ
Commits
69d31edc
Commit
69d31edc
authored
Sep 14, 2023
by
Casper
Browse files
Custom data example
parent
84e82744
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
30 additions
and
0 deletions
+30
-0
examples/quant_custom_data.py
examples/quant_custom_data.py
+30
-0
No files found.
examples/quant_custom_data.py
0 → 100644
View file @
69d31edc
from
datasets
import
load_dataset
from
awq
import
AutoAWQForCausalLM
from
transformers
import
AutoTokenizer
model_path
=
'lmsys/vicuna-7b-v1.5'
quant_path
=
'vicuna-7b-v1.5-awq'
quant_config
=
{
"zero_point"
:
True
,
"q_group_size"
:
128
,
"w_bit"
:
4
,
"version"
:
"GEMM"
}
# Load model
model
=
AutoAWQForCausalLM
.
from_pretrained
(
model_path
)
tokenizer
=
AutoTokenizer
.
from_pretrained
(
model_path
,
trust_remote_code
=
True
)
def
load_dolly
():
data
=
load_dataset
(
'databricks/databricks-dolly-15k'
,
split
=
"train"
)
# concatenate data
def
concatenate_data
(
x
):
return
{
"text"
:
x
[
'instruction'
]
+
'
\n
'
+
x
[
'context'
]
+
'
\n
'
+
x
[
'response'
]}
concatenated
=
data
.
map
(
concatenate_data
)
return
[
text
for
text
in
concatenated
[
"text"
]]
# Quantize
model
.
quantize
(
tokenizer
,
quant_config
=
quant_config
,
calib_data
=
load_dolly
())
# Save quantized model
model
.
save_quantized
(
quant_path
)
tokenizer
.
save_pretrained
(
quant_path
)
print
(
f
'Model is quantized and saved at "
{
quant_path
}
"'
)
\ No newline at end of file
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment