Commit 02c00ce6 authored by rprenger's avatar rprenger
Browse files

Had bugs in the fix that I didn't notice until using the big server

parent a33e1b35
...@@ -61,9 +61,10 @@ class MegatronGenerate(Resource): ...@@ -61,9 +61,10 @@ class MegatronGenerate(Resource):
temperature = args.temperature temperature = args.temperature
if "temperature" in request.get_json(): if "temperature" in request.get_json():
temperature = request.get_json()["temperature"] temperature = request.get_json()["temperature"]
if not isinstance(temperature, float) or not \ if not (type(temperature) == int or type(temperature) == float):
0.0 < temperature <= 100.0: return "temperature must be a positive number less than or equal to 100.0"
return "temperature must be a positive float less than or equal to 100.0" if not (0.0 < temperature <= 100.0):
return "temperature must be a positive number less than or equal to 100.0"
add_BOS = False add_BOS = False
if "add_BOS" in request.get_json(): if "add_BOS" in request.get_json():
......
...@@ -185,7 +185,7 @@ def generate(model, sentences=None, tokens_to_generate=0, all_probs=False, tempe ...@@ -185,7 +185,7 @@ def generate(model, sentences=None, tokens_to_generate=0, all_probs=False, tempe
context_tokens_tensor, context_length_tensor = tokenize_batch(sentences, tokens_to_generate, add_BOS) context_tokens_tensor, context_length_tensor = tokenize_batch(sentences, tokens_to_generate, add_BOS)
send_generate_info(context_tokens_tensor, context_length_tensor, tokens_to_generate, all_probs, temperature) send_generate_info(context_tokens_tensor, context_length_tensor, tokens_to_generate, all_probs, temperature)
else: else:
context_length_tensor, context_tokens_tensor, tokens_to_generate, all_probs = receive_generate_info() context_length_tensor, context_tokens_tensor, tokens_to_generate, all_probs, temperature = receive_generate_info()
output = synced_generate(model, context_tokens_tensor, context_length_tensor, tokens_to_generate, all_probs, temperature) output = synced_generate(model, context_tokens_tensor, context_length_tensor, tokens_to_generate, all_probs, temperature)
if output is not None: if output is not None:
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment