Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
wxj
Megatron-LM
Commits
554bb262
"...targets/git@developer.sourcefind.cn:gaoqiong/migraphx.git" did not exist on "c3e02b18af53125fb36ff11a7802d850590e7cee"
Commit
554bb262
authored
Oct 19, 2021
by
rprenger
Browse files
Code that keeps it from dying when the input prompts are too long
parent
a3770921
Changes
4
Show whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
29 additions
and
17 deletions
+29
-17
megatron/text_generation/api.py
megatron/text_generation/api.py
+2
-1
megatron/text_generation/generation.py
megatron/text_generation/generation.py
+4
-0
megatron/text_generation_server.py
megatron/text_generation_server.py
+19
-15
tools/run_text_generation_server.py
tools/run_text_generation_server.py
+4
-1
No files found.
megatron/text_generation/api.py
View file @
554bb262
...
@@ -113,6 +113,7 @@ def generate(model,
...
@@ -113,6 +113,7 @@ def generate(model,
context_tokens_tensor
,
context_length_tensor
=
tokenize_prompts
(
context_tokens_tensor
,
context_length_tensor
=
tokenize_prompts
(
prompts
=
prompts
,
tokens_to_generate
=
tokens_to_generate
,
add_BOS
=
add_BOS
)
prompts
=
prompts
,
tokens_to_generate
=
tokens_to_generate
,
add_BOS
=
add_BOS
)
if
just_score
:
if
just_score
:
return
score_and_return_on_first_stage
(
return
score_and_return_on_first_stage
(
model
,
context_tokens_tensor
,
context_length_tensor
)
model
,
context_tokens_tensor
,
context_length_tensor
)
...
...
megatron/text_generation/generation.py
View file @
554bb262
...
@@ -131,6 +131,10 @@ def generate_tokens_probs_and_return_on_first_stage(
...
@@ -131,6 +131,10 @@ def generate_tokens_probs_and_return_on_first_stage(
max_sequence_length
=
tokens
.
size
(
1
)
max_sequence_length
=
tokens
.
size
(
1
)
max_sequence_length
=
min
(
max_sequence_length
,
args
.
max_position_embeddings
)
max_sequence_length
=
min
(
max_sequence_length
,
args
.
max_position_embeddings
)
# If the context is too big, this happens
if
min_prompt_length
>=
max_sequence_length
:
raise
ValueError
# forward step.
# forward step.
forward_step
=
ForwardStep
(
model
,
batch_size
,
max_sequence_length
)
forward_step
=
ForwardStep
(
model
,
batch_size
,
max_sequence_length
)
...
...
megatron/text_generation_server.py
View file @
554bb262
...
@@ -36,9 +36,6 @@ class MegatronGenerate(Resource):
...
@@ -36,9 +36,6 @@ class MegatronGenerate(Resource):
def
put
(
self
):
def
put
(
self
):
args
=
get_args
()
args
=
get_args
()
print
(
"request IP: "
+
str
(
request
.
remote_addr
))
print
(
json
.
dumps
(
request
.
get_json
()),
flush
=
True
)
print
(
"current time: "
,
datetime
.
datetime
.
now
())
if
not
"prompts"
in
request
.
get_json
():
if
not
"prompts"
in
request
.
get_json
():
return
"prompts argument required"
,
400
return
"prompts argument required"
,
400
...
@@ -106,7 +103,11 @@ class MegatronGenerate(Resource):
...
@@ -106,7 +103,11 @@ class MegatronGenerate(Resource):
return
"add_BOS must be a boolean value"
return
"add_BOS must be a boolean value"
with
lock
:
# Need to get lock to keep multiple threads from hitting code
with
lock
:
# Need to get lock to keep multiple threads from hitting code
print
(
"request IP: "
+
str
(
request
.
remote_addr
))
print
(
json
.
dumps
(
request
.
get_json
()),
flush
=
True
)
print
(
"start time: "
,
datetime
.
datetime
.
now
())
MegatronGenerate
.
send_do_generate
()
# Tell other ranks we're doing generate
MegatronGenerate
.
send_do_generate
()
# Tell other ranks we're doing generate
try
:
response
,
response_seg
,
response_logprobs
,
_
=
\
response
,
response_seg
,
response_logprobs
,
_
=
\
generate_and_post_process
(
generate_and_post_process
(
self
.
model
,
self
.
model
,
...
@@ -119,6 +120,9 @@ class MegatronGenerate(Resource):
...
@@ -119,6 +120,9 @@ class MegatronGenerate(Resource):
add_BOS
=
add_BOS
,
add_BOS
=
add_BOS
,
use_eod_token_for_early_termination
=
True
,
use_eod_token_for_early_termination
=
True
,
just_score
=
just_score
)
just_score
=
just_score
)
except
ValueError
as
ve
:
return
"Length of prompt + tokens_to_generate longer than allowed"
print
(
"end time: "
,
datetime
.
datetime
.
now
())
return
jsonify
({
"text"
:
response
,
return
jsonify
({
"text"
:
response
,
"segments"
:
response_seg
,
"segments"
:
response_seg
,
...
...
tools/run_text_generation_server.py
View file @
554bb262
...
@@ -78,4 +78,7 @@ if __name__ == "__main__":
...
@@ -78,4 +78,7 @@ if __name__ == "__main__":
choice
=
torch
.
cuda
.
LongTensor
(
1
)
choice
=
torch
.
cuda
.
LongTensor
(
1
)
torch
.
distributed
.
broadcast
(
choice
,
0
)
torch
.
distributed
.
broadcast
(
choice
,
0
)
if
choice
[
0
].
item
()
==
0
:
if
choice
[
0
].
item
()
==
0
:
try
:
generate_and_post_process
(
model
)
generate_and_post_process
(
model
)
except
ValueError
as
ve
:
pass
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment