Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
zhougaofeng
internlm2-math-7B
Commits
ca97c9b4
Commit
ca97c9b4
authored
Jun 11, 2024
by
zhougaofeng
Browse files
Upload New File
parent
364b7eb7
Pipeline
#1175
canceled with stages
Changes
1
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
47 additions
and
0 deletions
+47
-0
src/llmfactory/model/utils/rope.py
src/llmfactory/model/utils/rope.py
+47
-0
No files found.
src/llmfactory/model/utils/rope.py
0 → 100644
View file @
ca97c9b4
import
math
from
typing
import
TYPE_CHECKING
from
...extras.logging
import
get_logger
if
TYPE_CHECKING
:
from
transformers
import
PretrainedConfig
from
...hparams
import
ModelArguments
logger
=
get_logger
(
__name__
)
def
configure_rope
(
config
:
"PretrainedConfig"
,
model_args
:
"ModelArguments"
,
is_trainable
:
bool
)
->
None
:
if
model_args
.
rope_scaling
is
None
:
return
if
not
hasattr
(
config
,
"rope_scaling"
):
logger
.
warning
(
"Current model does not support RoPE scaling."
)
return
if
is_trainable
:
if
model_args
.
rope_scaling
==
"dynamic"
:
logger
.
warning
(
"Dynamic NTK scaling may not work well with fine-tuning. "
"See: https://github.com/huggingface/transformers/pull/24653"
)
current_max_length
=
getattr
(
config
,
"max_position_embeddings"
,
None
)
if
current_max_length
and
model_args
.
model_max_length
>
current_max_length
:
logger
.
info
(
"Enlarge max model length from {} to {}."
.
format
(
current_max_length
,
model_args
.
model_max_length
)
)
setattr
(
config
,
"max_position_embeddings"
,
model_args
.
model_max_length
)
scaling_factor
=
float
(
math
.
ceil
(
model_args
.
model_max_length
/
current_max_length
))
else
:
logger
.
warning
(
"Input length is smaller than max length. Consider increase input length."
)
scaling_factor
=
1.0
else
:
scaling_factor
=
2.0
setattr
(
config
,
"rope_scaling"
,
{
"type"
:
model_args
.
rope_scaling
,
"factor"
:
scaling_factor
})
logger
.
info
(
"Using {} scaling strategy and setting scaling factor to {}"
.
format
(
model_args
.
rope_scaling
,
scaling_factor
)
)
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment