Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
text-generation-inference
Commits
dbdc587d
Unverified
Commit
dbdc587d
authored
May 16, 2023
by
OlivierDehaene
Committed by
GitHub
May 16, 2023
Browse files
feat(integration-tests): improve comparison and health checks (#336)
parent
e71471be
Changes
39
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
2924 additions
and
2807 deletions
+2924
-2807
.github/workflows/build.yaml
.github/workflows/build.yaml
+1
-0
clients/python/pyproject.toml
clients/python/pyproject.toml
+1
-1
clients/python/tests/test_client.py
clients/python/tests/test_client.py
+6
-6
clients/python/text_generation/types.py
clients/python/text_generation/types.py
+1
-1
integration-tests/conftest.py
integration-tests/conftest.py
+186
-11
integration-tests/models/__snapshots__/test_bloom_560m.ambr
integration-tests/models/__snapshots__/test_bloom_560m.ambr
+0
-627
integration-tests/models/__snapshots__/test_bloom_560m/test_bloom_560m.json
...models/__snapshots__/test_bloom_560m/test_bloom_560m.json
+128
-0
integration-tests/models/__snapshots__/test_bloom_560m/test_bloom_560m_all_params.json
...apshots__/test_bloom_560m/test_bloom_560m_all_params.json
+98
-0
integration-tests/models/__snapshots__/test_bloom_560m/test_bloom_560m_load.json
...s/__snapshots__/test_bloom_560m/test_bloom_560m_load.json
+514
-0
integration-tests/models/__snapshots__/test_bloom_560m_sharded.ambr
...n-tests/models/__snapshots__/test_bloom_560m_sharded.ambr
+0
-542
integration-tests/models/__snapshots__/test_bloom_560m_sharded/test_bloom_560m_sharded.json
...ts__/test_bloom_560m_sharded/test_bloom_560m_sharded.json
+128
-0
integration-tests/models/__snapshots__/test_bloom_560m_sharded/test_bloom_560m_sharded_load.json
...test_bloom_560m_sharded/test_bloom_560m_sharded_load.json
+514
-0
integration-tests/models/__snapshots__/test_flash_llama.ambr
integration-tests/models/__snapshots__/test_flash_llama.ambr
+0
-465
integration-tests/models/__snapshots__/test_flash_llama/test_flash_llama.json
...dels/__snapshots__/test_flash_llama/test_flash_llama.json
+88
-0
integration-tests/models/__snapshots__/test_flash_llama/test_flash_llama_all_params.json
...shots__/test_flash_llama/test_flash_llama_all_params.json
+88
-0
integration-tests/models/__snapshots__/test_flash_llama/test_flash_llama_load.json
...__snapshots__/test_flash_llama/test_flash_llama_load.json
+354
-0
integration-tests/models/__snapshots__/test_flash_neox.ambr
integration-tests/models/__snapshots__/test_flash_neox.ambr
+0
-682
integration-tests/models/__snapshots__/test_flash_neox/test_flash_neox.json
...models/__snapshots__/test_flash_neox/test_flash_neox.json
+163
-0
integration-tests/models/__snapshots__/test_flash_neox/test_flash_neox_load.json
...s/__snapshots__/test_flash_neox/test_flash_neox_load.json
+654
-0
integration-tests/models/__snapshots__/test_flash_santacoder.ambr
...ion-tests/models/__snapshots__/test_flash_santacoder.ambr
+0
-472
No files found.
.github/workflows/build.yaml
View file @
dbdc587d
...
@@ -10,6 +10,7 @@ on:
...
@@ -10,6 +10,7 @@ on:
pull_request
:
pull_request
:
paths
:
paths
:
-
"
.github/workflows/build.yaml"
-
"
.github/workflows/build.yaml"
-
"
integration-tests/**"
-
"
server/**"
-
"
server/**"
-
"
proto/**"
-
"
proto/**"
-
"
router/**"
-
"
router/**"
...
...
clients/python/pyproject.toml
View file @
dbdc587d
[tool.poetry]
[tool.poetry]
name
=
"text-generation"
name
=
"text-generation"
version
=
"0.5.
1
"
version
=
"0.5.
2
"
description
=
"Hugging Face Text Generation Python Client"
description
=
"Hugging Face Text Generation Python Client"
license
=
"Apache-2.0"
license
=
"Apache-2.0"
authors
=
[
"Olivier Dehaene <olivier@huggingface.co>"
]
authors
=
[
"Olivier Dehaene <olivier@huggingface.co>"
]
...
...
clients/python/tests/test_client.py
View file @
dbdc587d
...
@@ -16,9 +16,9 @@ def test_generate(flan_t5_xxl_url, hf_headers):
...
@@ -16,9 +16,9 @@ def test_generate(flan_t5_xxl_url, hf_headers):
assert
len
(
response
.
details
.
prefill
)
==
1
assert
len
(
response
.
details
.
prefill
)
==
1
assert
response
.
details
.
prefill
[
0
]
==
PrefillToken
(
id
=
0
,
text
=
"<pad>"
,
logprob
=
None
)
assert
response
.
details
.
prefill
[
0
]
==
PrefillToken
(
id
=
0
,
text
=
"<pad>"
,
logprob
=
None
)
assert
len
(
response
.
details
.
tokens
)
==
1
assert
len
(
response
.
details
.
tokens
)
==
1
assert
response
.
details
.
tokens
[
0
]
==
Token
(
assert
response
.
details
.
tokens
[
0
]
.
id
==
3
id
=
3
,
text
=
""
,
logprob
=-
1.984375
,
special
=
False
assert
response
.
details
.
tokens
[
0
].
text
==
""
)
assert
not
response
.
details
.
tokens
[
0
].
special
def
test_generate_best_of
(
flan_t5_xxl_url
,
hf_headers
):
def
test_generate_best_of
(
flan_t5_xxl_url
,
hf_headers
):
...
@@ -82,9 +82,9 @@ async def test_generate_async(flan_t5_xxl_url, hf_headers):
...
@@ -82,9 +82,9 @@ async def test_generate_async(flan_t5_xxl_url, hf_headers):
assert
len
(
response
.
details
.
prefill
)
==
1
assert
len
(
response
.
details
.
prefill
)
==
1
assert
response
.
details
.
prefill
[
0
]
==
PrefillToken
(
id
=
0
,
text
=
"<pad>"
,
logprob
=
None
)
assert
response
.
details
.
prefill
[
0
]
==
PrefillToken
(
id
=
0
,
text
=
"<pad>"
,
logprob
=
None
)
assert
len
(
response
.
details
.
tokens
)
==
1
assert
len
(
response
.
details
.
tokens
)
==
1
assert
response
.
details
.
tokens
[
0
]
==
Token
(
assert
response
.
details
.
tokens
[
0
]
.
id
==
3
id
=
3
,
text
=
""
,
logprob
=-
1.984375
,
special
=
False
assert
response
.
details
.
tokens
[
0
].
text
==
""
)
assert
not
response
.
details
.
tokens
[
0
].
special
@
pytest
.
mark
.
asyncio
@
pytest
.
mark
.
asyncio
...
...
clients/python/text_generation/types.py
View file @
dbdc587d
...
@@ -154,7 +154,7 @@ class Token(BaseModel):
...
@@ -154,7 +154,7 @@ class Token(BaseModel):
# Generation finish reason
# Generation finish reason
class
FinishReason
(
Enum
):
class
FinishReason
(
str
,
Enum
):
# number of generated tokens == `max_new_tokens`
# number of generated tokens == `max_new_tokens`
Length
=
"length"
Length
=
"length"
# the model generated its end of sequence token
# the model generated its end of sequence token
...
...
integration-tests/conftest.py
View file @
dbdc587d
...
@@ -4,22 +4,192 @@ import pytest
...
@@ -4,22 +4,192 @@ import pytest
import
asyncio
import
asyncio
import
os
import
os
import
docker
import
docker
import
json
import
math
import
time
from
docker.errors
import
NotFound
from
docker.errors
import
NotFound
from
typing
import
Optional
,
List
from
typing
import
Optional
,
List
,
Dict
from
syrupy.filters
import
props
from
syrupy.extensions.json
import
JSONSnapshotExtension
from
aiohttp
import
ClientConnectorError
,
ClientOSError
,
ServerDisconnectedError
from
text_generation
import
AsyncClient
from
text_generation
import
AsyncClient
from
text_generation.types
import
Response
from
text_generation.types
import
Response
,
Details
,
PrefillToken
,
Token
,
BestOfSequence
DOCKER_IMAGE
=
os
.
getenv
(
"DOCKER_IMAGE"
,
None
)
DOCKER_IMAGE
=
os
.
getenv
(
"DOCKER_IMAGE"
,
None
)
HUGGING_FACE_HUB_TOKEN
=
os
.
getenv
(
"HUGGING_FACE_HUB_TOKEN"
,
None
)
HUGGING_FACE_HUB_TOKEN
=
os
.
getenv
(
"HUGGING_FACE_HUB_TOKEN"
,
None
)
DOCKER_VOLUME
=
os
.
getenv
(
"DOCKER_VOLUME"
,
"/data"
)
DOCKER_VOLUME
=
os
.
getenv
(
"DOCKER_VOLUME"
,
"/data"
)
class
ResponseComparator
(
JSONSnapshotExtension
):
def
serialize
(
self
,
data
,
*
,
exclude
=
None
,
matcher
=
None
,
):
if
isinstance
(
data
,
List
):
data
=
[
d
.
dict
()
for
d
in
data
]
data
=
self
.
_filter
(
data
=
data
,
depth
=
0
,
path
=
(),
exclude
=
exclude
,
matcher
=
matcher
)
return
json
.
dumps
(
data
,
indent
=
2
,
ensure_ascii
=
False
,
sort_keys
=
False
)
+
"
\n
"
def
matches
(
self
,
*
,
serialized_data
,
snapshot_data
,
)
->
bool
:
def
convert_data
(
data
):
data
=
json
.
loads
(
data
)
if
isinstance
(
data
,
Dict
):
return
Response
(
**
data
)
if
isinstance
(
data
,
List
):
return
[
Response
(
**
d
)
for
d
in
data
]
raise
NotImplementedError
def
eq_token
(
token
:
Token
,
other
:
Token
)
->
bool
:
return
(
token
.
id
==
other
.
id
and
token
.
text
==
other
.
text
and
math
.
isclose
(
token
.
logprob
,
other
.
logprob
,
rel_tol
=
0.2
)
and
token
.
special
==
other
.
special
)
def
eq_prefill_token
(
prefill_token
:
PrefillToken
,
other
:
PrefillToken
)
->
bool
:
try
:
return
(
prefill_token
.
id
==
other
.
id
and
prefill_token
.
text
==
other
.
text
and
(
math
.
isclose
(
prefill_token
.
logprob
,
other
.
logprob
,
rel_tol
=
0.2
)
if
prefill_token
.
logprob
is
not
None
else
prefill_token
.
logprob
==
other
.
logprob
)
)
except
TypeError
:
return
False
def
eq_best_of
(
details
:
BestOfSequence
,
other
:
BestOfSequence
)
->
bool
:
return
(
details
.
finish_reason
==
other
.
finish_reason
and
details
.
generated_tokens
==
other
.
generated_tokens
and
details
.
seed
==
other
.
seed
and
len
(
details
.
prefill
)
==
len
(
other
.
prefill
)
and
all
(
[
eq_prefill_token
(
d
,
o
)
for
d
,
o
in
zip
(
details
.
prefill
,
other
.
prefill
)
]
)
and
len
(
details
.
tokens
)
==
len
(
other
.
tokens
)
and
all
([
eq_token
(
d
,
o
)
for
d
,
o
in
zip
(
details
.
tokens
,
other
.
tokens
)])
)
def
eq_details
(
details
:
Details
,
other
:
Details
)
->
bool
:
return
(
details
.
finish_reason
==
other
.
finish_reason
and
details
.
generated_tokens
==
other
.
generated_tokens
and
details
.
seed
==
other
.
seed
and
len
(
details
.
prefill
)
==
len
(
other
.
prefill
)
and
all
(
[
eq_prefill_token
(
d
,
o
)
for
d
,
o
in
zip
(
details
.
prefill
,
other
.
prefill
)
]
)
and
len
(
details
.
tokens
)
==
len
(
other
.
tokens
)
and
all
([
eq_token
(
d
,
o
)
for
d
,
o
in
zip
(
details
.
tokens
,
other
.
tokens
)])
and
(
len
(
details
.
best_of_sequences
)
if
details
.
best_of_sequences
is
not
None
else
0
)
==
(
len
(
other
.
best_of_sequences
)
if
other
.
best_of_sequences
is
not
None
else
0
)
and
(
all
(
[
eq_best_of
(
d
,
o
)
for
d
,
o
in
zip
(
details
.
best_of_sequences
,
other
.
best_of_sequences
)
]
)
if
details
.
best_of_sequences
is
not
None
else
details
.
best_of_sequences
==
other
.
best_of_sequences
)
)
def
eq_response
(
response
:
Response
,
other
:
Response
)
->
bool
:
return
response
.
generated_text
==
other
.
generated_text
and
eq_details
(
response
.
details
,
other
.
details
)
serialized_data
=
convert_data
(
serialized_data
)
snapshot_data
=
convert_data
(
snapshot_data
)
if
not
isinstance
(
serialized_data
,
List
):
serialized_data
=
[
serialized_data
]
if
not
isinstance
(
snapshot_data
,
List
):
snapshot_data
=
[
snapshot_data
]
return
len
(
snapshot_data
)
==
len
(
serialized_data
)
and
all
(
[
eq_response
(
r
,
o
)
for
r
,
o
in
zip
(
serialized_data
,
snapshot_data
)]
)
class
LauncherHandle
:
def
__init__
(
self
,
port
:
int
):
self
.
client
=
AsyncClient
(
f
"http://localhost:
{
port
}
"
)
def
_inner_health
(
self
):
raise
NotImplementedError
async
def
health
(
self
,
timeout
:
int
=
60
):
assert
timeout
>
0
for
_
in
range
(
timeout
):
if
not
self
.
_inner_health
():
raise
RuntimeError
(
"Launcher crashed"
)
try
:
await
self
.
client
.
generate
(
"test"
)
return
except
(
ClientConnectorError
,
ClientOSError
,
ServerDisconnectedError
)
as
e
:
time
.
sleep
(
1
)
raise
RuntimeError
(
"Health check failed"
)
class
ContainerLauncherHandle
(
LauncherHandle
):
def
__init__
(
self
,
docker_client
,
container_name
,
port
:
int
):
super
(
ContainerLauncherHandle
,
self
).
__init__
(
port
)
self
.
docker_client
=
docker_client
self
.
container_name
=
container_name
def
_inner_health
(
self
)
->
bool
:
container
=
self
.
docker_client
.
containers
.
get
(
self
.
container_name
)
return
container
.
status
in
[
"running"
,
"created"
]
class
ProcessLauncherHandle
(
LauncherHandle
):
def
__init__
(
self
,
process
,
port
:
int
):
super
(
ProcessLauncherHandle
,
self
).
__init__
(
port
)
self
.
process
=
process
def
_inner_health
(
self
)
->
bool
:
return
self
.
process
.
poll
()
is
None
@
pytest
.
fixture
@
pytest
.
fixture
def
snapshot
_test
(
snapshot
):
def
response_
snapshot
(
snapshot
):
return
lambda
value
:
value
==
snapshot
(
exclude
=
props
(
"logprob"
)
)
return
snapshot
.
use_extension
(
ResponseComparator
)
@
pytest
.
fixture
(
scope
=
"module"
)
@
pytest
.
fixture
(
scope
=
"module"
)
...
@@ -60,7 +230,7 @@ def launcher(event_loop):
...
@@ -60,7 +230,7 @@ def launcher(event_loop):
with
subprocess
.
Popen
(
with
subprocess
.
Popen
(
args
,
stdout
=
subprocess
.
PIPE
,
stderr
=
subprocess
.
PIPE
args
,
stdout
=
subprocess
.
PIPE
,
stderr
=
subprocess
.
PIPE
)
as
process
:
)
as
process
:
yield
AsyncClient
(
f
"http://localhost:
{
port
}
"
)
yield
ProcessLauncherHandle
(
process
,
port
)
process
.
terminate
()
process
.
terminate
()
process
.
wait
(
60
)
process
.
wait
(
60
)
...
@@ -110,7 +280,7 @@ def launcher(event_loop):
...
@@ -110,7 +280,7 @@ def launcher(event_loop):
command
=
args
,
command
=
args
,
name
=
container_name
,
name
=
container_name
,
environment
=
env
,
environment
=
env
,
auto_remove
=
Tru
e
,
auto_remove
=
Fals
e
,
detach
=
True
,
detach
=
True
,
device_requests
=
[
device_requests
=
[
docker
.
types
.
DeviceRequest
(
count
=
gpu_count
,
capabilities
=
[[
"gpu"
]])
docker
.
types
.
DeviceRequest
(
count
=
gpu_count
,
capabilities
=
[[
"gpu"
]])
...
@@ -119,13 +289,19 @@ def launcher(event_loop):
...
@@ -119,13 +289,19 @@ def launcher(event_loop):
ports
=
{
"80/tcp"
:
port
},
ports
=
{
"80/tcp"
:
port
},
)
)
yield
AsyncClient
(
f
"http://localhost:
{
port
}
"
)
yield
ContainerLauncherHandle
(
client
,
container
.
name
,
port
)
container
.
stop
()
try
:
container
.
stop
()
container
.
wait
()
except
NotFound
:
pass
container_output
=
container
.
logs
().
decode
(
"utf-8"
)
container_output
=
container
.
logs
().
decode
(
"utf-8"
)
print
(
container_output
)
print
(
container_output
)
container
.
remove
()
if
DOCKER_IMAGE
is
not
None
:
if
DOCKER_IMAGE
is
not
None
:
return
docker_launcher
return
docker_launcher
return
local_launcher
return
local_launcher
...
@@ -140,7 +316,6 @@ def generate_load():
...
@@ -140,7 +316,6 @@ def generate_load():
client
.
generate
(
prompt
,
max_new_tokens
=
max_new_tokens
)
for
_
in
range
(
n
)
client
.
generate
(
prompt
,
max_new_tokens
=
max_new_tokens
)
for
_
in
range
(
n
)
]
]
results
=
await
asyncio
.
gather
(
*
futures
)
return
await
asyncio
.
gather
(
*
futures
)
return
[
r
.
dict
()
for
r
in
results
]
return
generate_load_inner
return
generate_load_inner
integration-tests/models/__snapshots__/test_bloom_560m.ambr
deleted
100644 → 0
View file @
e71471be
# serializer version: 1
# name: test_bloom_560m
dict({
'details': dict({
'best_of_sequences': None,
'finish_reason': <FinishReason.Length: 'length'>,
'generated_tokens': 10,
'prefill': list([
dict({
'id': 17934,
'text': 'Pour',
}),
dict({
'id': 49833,
'text': ' dég',
}),
dict({
'id': 21543,
'text': 'uster',
}),
dict({
'id': 447,
'text': ' un',
}),
dict({
'id': 46341,
'text': ' ort',
}),
dict({
'id': 35567,
'text': 'olan',
}),
dict({
'id': 15,
'text': ',',
}),
dict({
'id': 1669,
'text': ' il',
}),
dict({
'id': 11580,
'text': ' faut',
}),
dict({
'id': 3913,
'text': ' tout',
}),
dict({
'id': 39261,
'text': " d'abord",
}),
]),
'seed': 0,
'tokens': list([
dict({
'id': 578,
'special': False,
'text': ' le',
}),
dict({
'id': 5608,
'special': False,
'text': ' faire',
}),
dict({
'id': 159570,
'special': False,
'text': ' réch',
}),
dict({
'id': 810,
'special': False,
'text': 'au',
}),
dict({
'id': 12736,
'special': False,
'text': 'ffer',
}),
dict({
'id': 1742,
'special': False,
'text': ' au',
}),
dict({
'id': 6105,
'special': False,
'text': ' bain',
}),
dict({
'id': 88254,
'special': False,
'text': '-mar',
}),
dict({
'id': 641,
'special': False,
'text': 'ie',
}),
dict({
'id': 2940,
'special': False,
'text': ' avec',
}),
]),
}),
'generated_text': ' le faire réchauffer au bain-marie avec',
})
# ---
# name: test_bloom_560m_all_params
dict({
'details': dict({
'best_of_sequences': None,
'finish_reason': <FinishReason.Length: 'length'>,
'generated_tokens': 10,
'prefill': list([
dict({
'id': 15,
'text': ',',
}),
dict({
'id': 1669,
'text': ' il',
}),
dict({
'id': 11580,
'text': ' faut',
}),
dict({
'id': 3913,
'text': ' tout',
}),
dict({
'id': 39261,
'text': " d'abord",
}),
]),
'seed': 0,
'tokens': list([
dict({
'id': 408,
'special': False,
'text': ' que',
}),
dict({
'id': 20288,
'special': False,
'text': " l'on",
}),
dict({
'id': 22255,
'special': False,
'text': ' trouve',
}),
dict({
'id': 1622,
'special': False,
'text': ' une',
}),
dict({
'id': 187079,
'special': False,
'text': ' posture',
}),
dict({
'id': 501,
'special': False,
'text': ' par',
}),
dict({
'id': 8741,
'special': False,
'text': ' rapport',
}),
dict({
'id': 693,
'special': False,
'text': ' à',
}),
dict({
'id': 366,
'special': False,
'text': ' la',
}),
dict({
'id': 36503,
'special': False,
'text': ' pratique',
}),
]),
}),
'generated_text': "Pour déguster un ortolan, il faut tout d'abord que l'on trouve une posture par rapport à la pratique",
})
# ---
# name: test_bloom_560m_load
list([
dict({
'details': dict({
'best_of_sequences': None,
'finish_reason': <FinishReason.Length: 'length'>,
'generated_tokens': 10,
'prefill': list([
dict({
'id': 17934,
'text': 'Pour',
}),
dict({
'id': 49833,
'text': ' dég',
}),
dict({
'id': 21543,
'text': 'uster',
}),
dict({
'id': 447,
'text': ' un',
}),
dict({
'id': 46341,
'text': ' ort',
}),
dict({
'id': 35567,
'text': 'olan',
}),
dict({
'id': 15,
'text': ',',
}),
dict({
'id': 1669,
'text': ' il',
}),
dict({
'id': 11580,
'text': ' faut',
}),
dict({
'id': 3913,
'text': ' tout',
}),
dict({
'id': 39261,
'text': " d'abord",
}),
]),
'seed': None,
'tokens': list([
dict({
'id': 578,
'special': False,
'text': ' le',
}),
dict({
'id': 5608,
'special': False,
'text': ' faire',
}),
dict({
'id': 1767,
'special': False,
'text': ' cu',
}),
dict({
'id': 1273,
'special': False,
'text': 'ire',
}),
dict({
'id': 1486,
'special': False,
'text': ' dans',
}),
dict({
'id': 283,
'special': False,
'text': ' de',
}),
dict({
'id': 40410,
'special': False,
'text': " l'eau",
}),
dict({
'id': 20226,
'special': False,
'text': ' bou',
}),
dict({
'id': 172483,
'special': False,
'text': 'illante',
}),
dict({
'id': 2805,
'special': False,
'text': ' sal',
}),
]),
}),
'generated_text': " le faire cuire dans de l'eau bouillante sal",
}),
dict({
'details': dict({
'best_of_sequences': None,
'finish_reason': <FinishReason.Length: 'length'>,
'generated_tokens': 10,
'prefill': list([
dict({
'id': 17934,
'text': 'Pour',
}),
dict({
'id': 49833,
'text': ' dég',
}),
dict({
'id': 21543,
'text': 'uster',
}),
dict({
'id': 447,
'text': ' un',
}),
dict({
'id': 46341,
'text': ' ort',
}),
dict({
'id': 35567,
'text': 'olan',
}),
dict({
'id': 15,
'text': ',',
}),
dict({
'id': 1669,
'text': ' il',
}),
dict({
'id': 11580,
'text': ' faut',
}),
dict({
'id': 3913,
'text': ' tout',
}),
dict({
'id': 39261,
'text': " d'abord",
}),
]),
'seed': None,
'tokens': list([
dict({
'id': 578,
'special': False,
'text': ' le',
}),
dict({
'id': 5608,
'special': False,
'text': ' faire',
}),
dict({
'id': 1767,
'special': False,
'text': ' cu',
}),
dict({
'id': 1273,
'special': False,
'text': 'ire',
}),
dict({
'id': 1486,
'special': False,
'text': ' dans',
}),
dict({
'id': 283,
'special': False,
'text': ' de',
}),
dict({
'id': 40410,
'special': False,
'text': " l'eau",
}),
dict({
'id': 20226,
'special': False,
'text': ' bou',
}),
dict({
'id': 172483,
'special': False,
'text': 'illante',
}),
dict({
'id': 2805,
'special': False,
'text': ' sal',
}),
]),
}),
'generated_text': " le faire cuire dans de l'eau bouillante sal",
}),
dict({
'details': dict({
'best_of_sequences': None,
'finish_reason': <FinishReason.Length: 'length'>,
'generated_tokens': 10,
'prefill': list([
dict({
'id': 17934,
'text': 'Pour',
}),
dict({
'id': 49833,
'text': ' dég',
}),
dict({
'id': 21543,
'text': 'uster',
}),
dict({
'id': 447,
'text': ' un',
}),
dict({
'id': 46341,
'text': ' ort',
}),
dict({
'id': 35567,
'text': 'olan',
}),
dict({
'id': 15,
'text': ',',
}),
dict({
'id': 1669,
'text': ' il',
}),
dict({
'id': 11580,
'text': ' faut',
}),
dict({
'id': 3913,
'text': ' tout',
}),
dict({
'id': 39261,
'text': " d'abord",
}),
]),
'seed': None,
'tokens': list([
dict({
'id': 578,
'special': False,
'text': ' le',
}),
dict({
'id': 5608,
'special': False,
'text': ' faire',
}),
dict({
'id': 1767,
'special': False,
'text': ' cu',
}),
dict({
'id': 1273,
'special': False,
'text': 'ire',
}),
dict({
'id': 1486,
'special': False,
'text': ' dans',
}),
dict({
'id': 283,
'special': False,
'text': ' de',
}),
dict({
'id': 40410,
'special': False,
'text': " l'eau",
}),
dict({
'id': 20226,
'special': False,
'text': ' bou',
}),
dict({
'id': 172483,
'special': False,
'text': 'illante',
}),
dict({
'id': 2805,
'special': False,
'text': ' sal',
}),
]),
}),
'generated_text': " le faire cuire dans de l'eau bouillante sal",
}),
dict({
'details': dict({
'best_of_sequences': None,
'finish_reason': <FinishReason.Length: 'length'>,
'generated_tokens': 10,
'prefill': list([
dict({
'id': 17934,
'text': 'Pour',
}),
dict({
'id': 49833,
'text': ' dég',
}),
dict({
'id': 21543,
'text': 'uster',
}),
dict({
'id': 447,
'text': ' un',
}),
dict({
'id': 46341,
'text': ' ort',
}),
dict({
'id': 35567,
'text': 'olan',
}),
dict({
'id': 15,
'text': ',',
}),
dict({
'id': 1669,
'text': ' il',
}),
dict({
'id': 11580,
'text': ' faut',
}),
dict({
'id': 3913,
'text': ' tout',
}),
dict({
'id': 39261,
'text': " d'abord",
}),
]),
'seed': None,
'tokens': list([
dict({
'id': 578,
'special': False,
'text': ' le',
}),
dict({
'id': 5608,
'special': False,
'text': ' faire',
}),
dict({
'id': 1767,
'special': False,
'text': ' cu',
}),
dict({
'id': 1273,
'special': False,
'text': 'ire',
}),
dict({
'id': 1486,
'special': False,
'text': ' dans',
}),
dict({
'id': 283,
'special': False,
'text': ' de',
}),
dict({
'id': 40410,
'special': False,
'text': " l'eau",
}),
dict({
'id': 20226,
'special': False,
'text': ' bou',
}),
dict({
'id': 172483,
'special': False,
'text': 'illante',
}),
dict({
'id': 2805,
'special': False,
'text': ' sal',
}),
]),
}),
'generated_text': " le faire cuire dans de l'eau bouillante sal",
}),
])
# ---
integration-tests/models/__snapshots__/test_bloom_560m/test_bloom_560m.json
0 → 100644
View file @
dbdc587d
{
"details"
:
{
"best_of_sequences"
:
null
,
"finish_reason"
:
"length"
,
"generated_tokens"
:
10
,
"prefill"
:
[
{
"id"
:
17934
,
"logprob"
:
null
,
"text"
:
"Pour"
},
{
"id"
:
49833
,
"logprob"
:
-10.5625
,
"text"
:
" dég"
},
{
"id"
:
21543
,
"logprob"
:
-0.14770508
,
"text"
:
"uster"
},
{
"id"
:
447
,
"logprob"
:
-1.9287109
,
"text"
:
" un"
},
{
"id"
:
46341
,
"logprob"
:
-15.4609375
,
"text"
:
" ort"
},
{
"id"
:
35567
,
"logprob"
:
-7.5585938
,
"text"
:
"olan"
},
{
"id"
:
15
,
"logprob"
:
-1.4003906
,
"text"
:
","
},
{
"id"
:
1669
,
"logprob"
:
-1.5673828
,
"text"
:
" il"
},
{
"id"
:
11580
,
"logprob"
:
-0.94628906
,
"text"
:
" faut"
},
{
"id"
:
3913
,
"logprob"
:
-3.703125
,
"text"
:
" tout"
},
{
"id"
:
39261
,
"logprob"
:
-1.5732422
,
"text"
:
" d'abord"
}
],
"seed"
:
0
,
"tokens"
:
[
{
"id"
:
578
,
"logprob"
:
-1.6591797
,
"special"
:
false
,
"text"
:
" le"
},
{
"id"
:
5608
,
"logprob"
:
-2.4492188
,
"special"
:
false
,
"text"
:
" faire"
},
{
"id"
:
159570
,
"logprob"
:
-6.6835938
,
"special"
:
false
,
"text"
:
" réch"
},
{
"id"
:
810
,
"logprob"
:
0.0
,
"special"
:
false
,
"text"
:
"au"
},
{
"id"
:
12736
,
"logprob"
:
0.0
,
"special"
:
false
,
"text"
:
"ffer"
},
{
"id"
:
1742
,
"logprob"
:
-2.5175781
,
"special"
:
false
,
"text"
:
" au"
},
{
"id"
:
6105
,
"logprob"
:
-2.0078125
,
"special"
:
false
,
"text"
:
" bain"
},
{
"id"
:
88254
,
"logprob"
:
-0.12695312
,
"special"
:
false
,
"text"
:
"-mar"
},
{
"id"
:
641
,
"logprob"
:
0.0
,
"special"
:
false
,
"text"
:
"ie"
},
{
"id"
:
2940
,
"logprob"
:
-3.5175781
,
"special"
:
false
,
"text"
:
" avec"
}
]
},
"generated_text"
:
" le faire réchauffer au bain-marie avec"
}
integration-tests/models/__snapshots__/test_bloom_560m/test_bloom_560m_all_params.json
0 → 100644
View file @
dbdc587d
{
"details"
:
{
"best_of_sequences"
:
null
,
"finish_reason"
:
"length"
,
"generated_tokens"
:
10
,
"prefill"
:
[
{
"id"
:
15
,
"logprob"
:
null
,
"text"
:
","
},
{
"id"
:
1669
,
"logprob"
:
-5.4414062
,
"text"
:
" il"
},
{
"id"
:
11580
,
"logprob"
:
-2.3378906
,
"text"
:
" faut"
},
{
"id"
:
3913
,
"logprob"
:
-4.3554688
,
"text"
:
" tout"
},
{
"id"
:
39261
,
"logprob"
:
-2.9238281
,
"text"
:
" d'abord"
}
],
"seed"
:
0
,
"tokens"
:
[
{
"id"
:
408
,
"logprob"
:
-1.9267578
,
"special"
:
false
,
"text"
:
" que"
},
{
"id"
:
20288
,
"logprob"
:
-2.9257812
,
"special"
:
false
,
"text"
:
" l'on"
},
{
"id"
:
22255
,
"logprob"
:
-2.8964844
,
"special"
:
false
,
"text"
:
" trouve"
},
{
"id"
:
1622
,
"logprob"
:
-1.1083984
,
"special"
:
false
,
"text"
:
" une"
},
{
"id"
:
187079
,
"logprob"
:
-7.796875
,
"special"
:
false
,
"text"
:
" posture"
},
{
"id"
:
501
,
"logprob"
:
-5.390625
,
"special"
:
false
,
"text"
:
" par"
},
{
"id"
:
8741
,
"logprob"
:
-0.34936523
,
"special"
:
false
,
"text"
:
" rapport"
},
{
"id"
:
693
,
"logprob"
:
0.0
,
"special"
:
false
,
"text"
:
" à"
},
{
"id"
:
366
,
"logprob"
:
-2.3378906
,
"special"
:
false
,
"text"
:
" la"
},
{
"id"
:
36503
,
"logprob"
:
-3.6640625
,
"special"
:
false
,
"text"
:
" pratique"
}
]
},
"generated_text"
:
"Pour déguster un ortolan, il faut tout d'abord que l'on trouve une posture par rapport à la pratique"
}
integration-tests/models/__snapshots__/test_bloom_560m/test_bloom_560m_load.json
0 → 100644
View file @
dbdc587d
[
{
"details"
:
{
"best_of_sequences"
:
null
,
"finish_reason"
:
"length"
,
"generated_tokens"
:
10
,
"prefill"
:
[
{
"id"
:
17934
,
"logprob"
:
null
,
"text"
:
"Pour"
},
{
"id"
:
49833
,
"logprob"
:
-10.5625
,
"text"
:
" dég"
},
{
"id"
:
21543
,
"logprob"
:
-0.14770508
,
"text"
:
"uster"
},
{
"id"
:
447
,
"logprob"
:
-1.9287109
,
"text"
:
" un"
},
{
"id"
:
46341
,
"logprob"
:
-15.4609375
,
"text"
:
" ort"
},
{
"id"
:
35567
,
"logprob"
:
-7.5585938
,
"text"
:
"olan"
},
{
"id"
:
15
,
"logprob"
:
-1.4003906
,
"text"
:
","
},
{
"id"
:
1669
,
"logprob"
:
-1.5673828
,
"text"
:
" il"
},
{
"id"
:
11580
,
"logprob"
:
-0.94628906
,
"text"
:
" faut"
},
{
"id"
:
3913
,
"logprob"
:
-3.703125
,
"text"
:
" tout"
},
{
"id"
:
39261
,
"logprob"
:
-1.5732422
,
"text"
:
" d'abord"
}
],
"seed"
:
null
,
"tokens"
:
[
{
"id"
:
578
,
"logprob"
:
-1.7646484
,
"special"
:
false
,
"text"
:
" le"
},
{
"id"
:
5608
,
"logprob"
:
-2.6113281
,
"special"
:
false
,
"text"
:
" faire"
},
{
"id"
:
1767
,
"logprob"
:
-1.5263672
,
"special"
:
false
,
"text"
:
" cu"
},
{
"id"
:
1273
,
"logprob"
:
-0.00010049343
,
"special"
:
false
,
"text"
:
"ire"
},
{
"id"
:
1486
,
"logprob"
:
-1.4707031
,
"special"
:
false
,
"text"
:
" dans"
},
{
"id"
:
283
,
"logprob"
:
-1.2119141
,
"special"
:
false
,
"text"
:
" de"
},
{
"id"
:
40410
,
"logprob"
:
-0.11883545
,
"special"
:
false
,
"text"
:
" l'eau"
},
{
"id"
:
20226
,
"logprob"
:
-0.40844727
,
"special"
:
false
,
"text"
:
" bou"
},
{
"id"
:
172483
,
"logprob"
:
-0.0037841797
,
"special"
:
false
,
"text"
:
"illante"
},
{
"id"
:
2805
,
"logprob"
:
-1.0195312
,
"special"
:
false
,
"text"
:
" sal"
}
]
},
"generated_text"
:
" le faire cuire dans de l'eau bouillante sal"
},
{
"details"
:
{
"best_of_sequences"
:
null
,
"finish_reason"
:
"length"
,
"generated_tokens"
:
10
,
"prefill"
:
[
{
"id"
:
17934
,
"logprob"
:
null
,
"text"
:
"Pour"
},
{
"id"
:
49833
,
"logprob"
:
-10.53125
,
"text"
:
" dég"
},
{
"id"
:
21543
,
"logprob"
:
-0.14770508
,
"text"
:
"uster"
},
{
"id"
:
447
,
"logprob"
:
-1.9287109
,
"text"
:
" un"
},
{
"id"
:
46341
,
"logprob"
:
-15.4140625
,
"text"
:
" ort"
},
{
"id"
:
35567
,
"logprob"
:
-7.5234375
,
"text"
:
"olan"
},
{
"id"
:
15
,
"logprob"
:
-1.3613281
,
"text"
:
","
},
{
"id"
:
1669
,
"logprob"
:
-1.5458984
,
"text"
:
" il"
},
{
"id"
:
11580
,
"logprob"
:
-0.94189453
,
"text"
:
" faut"
},
{
"id"
:
3913
,
"logprob"
:
-3.7011719
,
"text"
:
" tout"
},
{
"id"
:
39261
,
"logprob"
:
-1.5732422
,
"text"
:
" d'abord"
}
],
"seed"
:
null
,
"tokens"
:
[
{
"id"
:
578
,
"logprob"
:
-1.7548828
,
"special"
:
false
,
"text"
:
" le"
},
{
"id"
:
5608
,
"logprob"
:
-2.578125
,
"special"
:
false
,
"text"
:
" faire"
},
{
"id"
:
1767
,
"logprob"
:
-1.5117188
,
"special"
:
false
,
"text"
:
" cu"
},
{
"id"
:
1273
,
"logprob"
:
-0.00010049343
,
"special"
:
false
,
"text"
:
"ire"
},
{
"id"
:
1486
,
"logprob"
:
-1.4707031
,
"special"
:
false
,
"text"
:
" dans"
},
{
"id"
:
283
,
"logprob"
:
-1.1982422
,
"special"
:
false
,
"text"
:
" de"
},
{
"id"
:
40410
,
"logprob"
:
-0.11004639
,
"special"
:
false
,
"text"
:
" l'eau"
},
{
"id"
:
20226
,
"logprob"
:
-0.4506836
,
"special"
:
false
,
"text"
:
" bou"
},
{
"id"
:
172483
,
"logprob"
:
-0.003047943
,
"special"
:
false
,
"text"
:
"illante"
},
{
"id"
:
2805
,
"logprob"
:
-1.0185547
,
"special"
:
false
,
"text"
:
" sal"
}
]
},
"generated_text"
:
" le faire cuire dans de l'eau bouillante sal"
},
{
"details"
:
{
"best_of_sequences"
:
null
,
"finish_reason"
:
"length"
,
"generated_tokens"
:
10
,
"prefill"
:
[
{
"id"
:
17934
,
"logprob"
:
null
,
"text"
:
"Pour"
},
{
"id"
:
49833
,
"logprob"
:
-10.53125
,
"text"
:
" dég"
},
{
"id"
:
21543
,
"logprob"
:
-0.14770508
,
"text"
:
"uster"
},
{
"id"
:
447
,
"logprob"
:
-1.9287109
,
"text"
:
" un"
},
{
"id"
:
46341
,
"logprob"
:
-15.4140625
,
"text"
:
" ort"
},
{
"id"
:
35567
,
"logprob"
:
-7.5234375
,
"text"
:
"olan"
},
{
"id"
:
15
,
"logprob"
:
-1.3613281
,
"text"
:
","
},
{
"id"
:
1669
,
"logprob"
:
-1.5458984
,
"text"
:
" il"
},
{
"id"
:
11580
,
"logprob"
:
-0.94189453
,
"text"
:
" faut"
},
{
"id"
:
3913
,
"logprob"
:
-3.7011719
,
"text"
:
" tout"
},
{
"id"
:
39261
,
"logprob"
:
-1.5732422
,
"text"
:
" d'abord"
}
],
"seed"
:
null
,
"tokens"
:
[
{
"id"
:
578
,
"logprob"
:
-1.7548828
,
"special"
:
false
,
"text"
:
" le"
},
{
"id"
:
5608
,
"logprob"
:
-2.578125
,
"special"
:
false
,
"text"
:
" faire"
},
{
"id"
:
1767
,
"logprob"
:
-1.5117188
,
"special"
:
false
,
"text"
:
" cu"
},
{
"id"
:
1273
,
"logprob"
:
-0.00010049343
,
"special"
:
false
,
"text"
:
"ire"
},
{
"id"
:
1486
,
"logprob"
:
-1.4707031
,
"special"
:
false
,
"text"
:
" dans"
},
{
"id"
:
283
,
"logprob"
:
-1.1982422
,
"special"
:
false
,
"text"
:
" de"
},
{
"id"
:
40410
,
"logprob"
:
-0.11004639
,
"special"
:
false
,
"text"
:
" l'eau"
},
{
"id"
:
20226
,
"logprob"
:
-0.4506836
,
"special"
:
false
,
"text"
:
" bou"
},
{
"id"
:
172483
,
"logprob"
:
-0.003047943
,
"special"
:
false
,
"text"
:
"illante"
},
{
"id"
:
2805
,
"logprob"
:
-1.0185547
,
"special"
:
false
,
"text"
:
" sal"
}
]
},
"generated_text"
:
" le faire cuire dans de l'eau bouillante sal"
},
{
"details"
:
{
"best_of_sequences"
:
null
,
"finish_reason"
:
"length"
,
"generated_tokens"
:
10
,
"prefill"
:
[
{
"id"
:
17934
,
"logprob"
:
null
,
"text"
:
"Pour"
},
{
"id"
:
49833
,
"logprob"
:
-10.53125
,
"text"
:
" dég"
},
{
"id"
:
21543
,
"logprob"
:
-0.14770508
,
"text"
:
"uster"
},
{
"id"
:
447
,
"logprob"
:
-1.9287109
,
"text"
:
" un"
},
{
"id"
:
46341
,
"logprob"
:
-15.4140625
,
"text"
:
" ort"
},
{
"id"
:
35567
,
"logprob"
:
-7.5234375
,
"text"
:
"olan"
},
{
"id"
:
15
,
"logprob"
:
-1.3613281
,
"text"
:
","
},
{
"id"
:
1669
,
"logprob"
:
-1.5458984
,
"text"
:
" il"
},
{
"id"
:
11580
,
"logprob"
:
-0.94189453
,
"text"
:
" faut"
},
{
"id"
:
3913
,
"logprob"
:
-3.7011719
,
"text"
:
" tout"
},
{
"id"
:
39261
,
"logprob"
:
-1.5732422
,
"text"
:
" d'abord"
}
],
"seed"
:
null
,
"tokens"
:
[
{
"id"
:
578
,
"logprob"
:
-1.7548828
,
"special"
:
false
,
"text"
:
" le"
},
{
"id"
:
5608
,
"logprob"
:
-2.578125
,
"special"
:
false
,
"text"
:
" faire"
},
{
"id"
:
1767
,
"logprob"
:
-1.5117188
,
"special"
:
false
,
"text"
:
" cu"
},
{
"id"
:
1273
,
"logprob"
:
-0.00010049343
,
"special"
:
false
,
"text"
:
"ire"
},
{
"id"
:
1486
,
"logprob"
:
-1.4707031
,
"special"
:
false
,
"text"
:
" dans"
},
{
"id"
:
283
,
"logprob"
:
-1.1982422
,
"special"
:
false
,
"text"
:
" de"
},
{
"id"
:
40410
,
"logprob"
:
-0.11004639
,
"special"
:
false
,
"text"
:
" l'eau"
},
{
"id"
:
20226
,
"logprob"
:
-0.4506836
,
"special"
:
false
,
"text"
:
" bou"
},
{
"id"
:
172483
,
"logprob"
:
-0.003047943
,
"special"
:
false
,
"text"
:
"illante"
},
{
"id"
:
2805
,
"logprob"
:
-1.0185547
,
"special"
:
false
,
"text"
:
" sal"
}
]
},
"generated_text"
:
" le faire cuire dans de l'eau bouillante sal"
}
]
integration-tests/models/__snapshots__/test_bloom_560m_sharded.ambr
deleted
100644 → 0
View file @
e71471be
# serializer version: 1
# name: test_bloom_560m_sharded
dict({
'details': dict({
'best_of_sequences': None,
'finish_reason': <FinishReason.Length: 'length'>,
'generated_tokens': 10,
'prefill': list([
dict({
'id': 17934,
'text': 'Pour',
}),
dict({
'id': 49833,
'text': ' dég',
}),
dict({
'id': 21543,
'text': 'uster',
}),
dict({
'id': 447,
'text': ' un',
}),
dict({
'id': 46341,
'text': ' ort',
}),
dict({
'id': 35567,
'text': 'olan',
}),
dict({
'id': 15,
'text': ',',
}),
dict({
'id': 1669,
'text': ' il',
}),
dict({
'id': 11580,
'text': ' faut',
}),
dict({
'id': 3913,
'text': ' tout',
}),
dict({
'id': 39261,
'text': " d'abord",
}),
]),
'seed': 0,
'tokens': list([
dict({
'id': 578,
'special': False,
'text': ' le',
}),
dict({
'id': 5608,
'special': False,
'text': ' faire',
}),
dict({
'id': 159570,
'special': False,
'text': ' réch',
}),
dict({
'id': 810,
'special': False,
'text': 'au',
}),
dict({
'id': 12736,
'special': False,
'text': 'ffer',
}),
dict({
'id': 1742,
'special': False,
'text': ' au',
}),
dict({
'id': 6105,
'special': False,
'text': ' bain',
}),
dict({
'id': 88254,
'special': False,
'text': '-mar',
}),
dict({
'id': 641,
'special': False,
'text': 'ie',
}),
dict({
'id': 2940,
'special': False,
'text': ' avec',
}),
]),
}),
'generated_text': ' le faire réchauffer au bain-marie avec',
})
# ---
# name: test_bloom_560m_sharded_load
list([
dict({
'details': dict({
'best_of_sequences': None,
'finish_reason': <FinishReason.Length: 'length'>,
'generated_tokens': 10,
'prefill': list([
dict({
'id': 17934,
'text': 'Pour',
}),
dict({
'id': 49833,
'text': ' dég',
}),
dict({
'id': 21543,
'text': 'uster',
}),
dict({
'id': 447,
'text': ' un',
}),
dict({
'id': 46341,
'text': ' ort',
}),
dict({
'id': 35567,
'text': 'olan',
}),
dict({
'id': 15,
'text': ',',
}),
dict({
'id': 1669,
'text': ' il',
}),
dict({
'id': 11580,
'text': ' faut',
}),
dict({
'id': 3913,
'text': ' tout',
}),
dict({
'id': 39261,
'text': " d'abord",
}),
]),
'seed': None,
'tokens': list([
dict({
'id': 578,
'special': False,
'text': ' le',
}),
dict({
'id': 5608,
'special': False,
'text': ' faire',
}),
dict({
'id': 1767,
'special': False,
'text': ' cu',
}),
dict({
'id': 1273,
'special': False,
'text': 'ire',
}),
dict({
'id': 1486,
'special': False,
'text': ' dans',
}),
dict({
'id': 283,
'special': False,
'text': ' de',
}),
dict({
'id': 40410,
'special': False,
'text': " l'eau",
}),
dict({
'id': 20226,
'special': False,
'text': ' bou',
}),
dict({
'id': 172483,
'special': False,
'text': 'illante',
}),
dict({
'id': 2805,
'special': False,
'text': ' sal',
}),
]),
}),
'generated_text': " le faire cuire dans de l'eau bouillante sal",
}),
dict({
'details': dict({
'best_of_sequences': None,
'finish_reason': <FinishReason.Length: 'length'>,
'generated_tokens': 10,
'prefill': list([
dict({
'id': 17934,
'text': 'Pour',
}),
dict({
'id': 49833,
'text': ' dég',
}),
dict({
'id': 21543,
'text': 'uster',
}),
dict({
'id': 447,
'text': ' un',
}),
dict({
'id': 46341,
'text': ' ort',
}),
dict({
'id': 35567,
'text': 'olan',
}),
dict({
'id': 15,
'text': ',',
}),
dict({
'id': 1669,
'text': ' il',
}),
dict({
'id': 11580,
'text': ' faut',
}),
dict({
'id': 3913,
'text': ' tout',
}),
dict({
'id': 39261,
'text': " d'abord",
}),
]),
'seed': None,
'tokens': list([
dict({
'id': 578,
'special': False,
'text': ' le',
}),
dict({
'id': 5608,
'special': False,
'text': ' faire',
}),
dict({
'id': 1767,
'special': False,
'text': ' cu',
}),
dict({
'id': 1273,
'special': False,
'text': 'ire',
}),
dict({
'id': 1486,
'special': False,
'text': ' dans',
}),
dict({
'id': 283,
'special': False,
'text': ' de',
}),
dict({
'id': 40410,
'special': False,
'text': " l'eau",
}),
dict({
'id': 20226,
'special': False,
'text': ' bou',
}),
dict({
'id': 172483,
'special': False,
'text': 'illante',
}),
dict({
'id': 2805,
'special': False,
'text': ' sal',
}),
]),
}),
'generated_text': " le faire cuire dans de l'eau bouillante sal",
}),
dict({
'details': dict({
'best_of_sequences': None,
'finish_reason': <FinishReason.Length: 'length'>,
'generated_tokens': 10,
'prefill': list([
dict({
'id': 17934,
'text': 'Pour',
}),
dict({
'id': 49833,
'text': ' dég',
}),
dict({
'id': 21543,
'text': 'uster',
}),
dict({
'id': 447,
'text': ' un',
}),
dict({
'id': 46341,
'text': ' ort',
}),
dict({
'id': 35567,
'text': 'olan',
}),
dict({
'id': 15,
'text': ',',
}),
dict({
'id': 1669,
'text': ' il',
}),
dict({
'id': 11580,
'text': ' faut',
}),
dict({
'id': 3913,
'text': ' tout',
}),
dict({
'id': 39261,
'text': " d'abord",
}),
]),
'seed': None,
'tokens': list([
dict({
'id': 578,
'special': False,
'text': ' le',
}),
dict({
'id': 5608,
'special': False,
'text': ' faire',
}),
dict({
'id': 1767,
'special': False,
'text': ' cu',
}),
dict({
'id': 1273,
'special': False,
'text': 'ire',
}),
dict({
'id': 1486,
'special': False,
'text': ' dans',
}),
dict({
'id': 283,
'special': False,
'text': ' de',
}),
dict({
'id': 40410,
'special': False,
'text': " l'eau",
}),
dict({
'id': 20226,
'special': False,
'text': ' bou',
}),
dict({
'id': 172483,
'special': False,
'text': 'illante',
}),
dict({
'id': 2805,
'special': False,
'text': ' sal',
}),
]),
}),
'generated_text': " le faire cuire dans de l'eau bouillante sal",
}),
dict({
'details': dict({
'best_of_sequences': None,
'finish_reason': <FinishReason.Length: 'length'>,
'generated_tokens': 10,
'prefill': list([
dict({
'id': 17934,
'text': 'Pour',
}),
dict({
'id': 49833,
'text': ' dég',
}),
dict({
'id': 21543,
'text': 'uster',
}),
dict({
'id': 447,
'text': ' un',
}),
dict({
'id': 46341,
'text': ' ort',
}),
dict({
'id': 35567,
'text': 'olan',
}),
dict({
'id': 15,
'text': ',',
}),
dict({
'id': 1669,
'text': ' il',
}),
dict({
'id': 11580,
'text': ' faut',
}),
dict({
'id': 3913,
'text': ' tout',
}),
dict({
'id': 39261,
'text': " d'abord",
}),
]),
'seed': None,
'tokens': list([
dict({
'id': 578,
'special': False,
'text': ' le',
}),
dict({
'id': 5608,
'special': False,
'text': ' faire',
}),
dict({
'id': 1767,
'special': False,
'text': ' cu',
}),
dict({
'id': 1273,
'special': False,
'text': 'ire',
}),
dict({
'id': 1486,
'special': False,
'text': ' dans',
}),
dict({
'id': 283,
'special': False,
'text': ' de',
}),
dict({
'id': 40410,
'special': False,
'text': " l'eau",
}),
dict({
'id': 20226,
'special': False,
'text': ' bou',
}),
dict({
'id': 172483,
'special': False,
'text': 'illante',
}),
dict({
'id': 2805,
'special': False,
'text': ' sal',
}),
]),
}),
'generated_text': " le faire cuire dans de l'eau bouillante sal",
}),
])
# ---
integration-tests/models/__snapshots__/test_bloom_560m_sharded/test_bloom_560m_sharded.json
0 → 100644
View file @
dbdc587d
{
"details"
:
{
"best_of_sequences"
:
null
,
"finish_reason"
:
"length"
,
"generated_tokens"
:
10
,
"prefill"
:
[
{
"id"
:
17934
,
"logprob"
:
null
,
"text"
:
"Pour"
},
{
"id"
:
49833
,
"logprob"
:
-10.5390625
,
"text"
:
" dég"
},
{
"id"
:
21543
,
"logprob"
:
-0.14758301
,
"text"
:
"uster"
},
{
"id"
:
447
,
"logprob"
:
-1.9296875
,
"text"
:
" un"
},
{
"id"
:
46341
,
"logprob"
:
-15.4453125
,
"text"
:
" ort"
},
{
"id"
:
35567
,
"logprob"
:
-7.59375
,
"text"
:
"olan"
},
{
"id"
:
15
,
"logprob"
:
-1.3994141
,
"text"
:
","
},
{
"id"
:
1669
,
"logprob"
:
-1.578125
,
"text"
:
" il"
},
{
"id"
:
11580
,
"logprob"
:
-0.9453125
,
"text"
:
" faut"
},
{
"id"
:
3913
,
"logprob"
:
-3.7011719
,
"text"
:
" tout"
},
{
"id"
:
39261
,
"logprob"
:
-1.5732422
,
"text"
:
" d'abord"
}
],
"seed"
:
0
,
"tokens"
:
[
{
"id"
:
578
,
"logprob"
:
-1.6474609
,
"special"
:
false
,
"text"
:
" le"
},
{
"id"
:
5608
,
"logprob"
:
-2.5097656
,
"special"
:
false
,
"text"
:
" faire"
},
{
"id"
:
159570
,
"logprob"
:
-6.65625
,
"special"
:
false
,
"text"
:
" réch"
},
{
"id"
:
810
,
"logprob"
:
0.0
,
"special"
:
false
,
"text"
:
"au"
},
{
"id"
:
12736
,
"logprob"
:
0.0
,
"special"
:
false
,
"text"
:
"ffer"
},
{
"id"
:
1742
,
"logprob"
:
-2.5859375
,
"special"
:
false
,
"text"
:
" au"
},
{
"id"
:
6105
,
"logprob"
:
-2.03125
,
"special"
:
false
,
"text"
:
" bain"
},
{
"id"
:
88254
,
"logprob"
:
-0.12695312
,
"special"
:
false
,
"text"
:
"-mar"
},
{
"id"
:
641
,
"logprob"
:
0.0
,
"special"
:
false
,
"text"
:
"ie"
},
{
"id"
:
2940
,
"logprob"
:
-3.5175781
,
"special"
:
false
,
"text"
:
" avec"
}
]
},
"generated_text"
:
" le faire réchauffer au bain-marie avec"
}
integration-tests/models/__snapshots__/test_bloom_560m_sharded/test_bloom_560m_sharded_load.json
0 → 100644
View file @
dbdc587d
[
{
"details"
:
{
"best_of_sequences"
:
null
,
"finish_reason"
:
"length"
,
"generated_tokens"
:
10
,
"prefill"
:
[
{
"id"
:
17934
,
"logprob"
:
null
,
"text"
:
"Pour"
},
{
"id"
:
49833
,
"logprob"
:
-10.5390625
,
"text"
:
" dég"
},
{
"id"
:
21543
,
"logprob"
:
-0.14758301
,
"text"
:
"uster"
},
{
"id"
:
447
,
"logprob"
:
-1.9296875
,
"text"
:
" un"
},
{
"id"
:
46341
,
"logprob"
:
-15.4453125
,
"text"
:
" ort"
},
{
"id"
:
35567
,
"logprob"
:
-7.59375
,
"text"
:
"olan"
},
{
"id"
:
15
,
"logprob"
:
-1.3994141
,
"text"
:
","
},
{
"id"
:
1669
,
"logprob"
:
-1.578125
,
"text"
:
" il"
},
{
"id"
:
11580
,
"logprob"
:
-0.9453125
,
"text"
:
" faut"
},
{
"id"
:
3913
,
"logprob"
:
-3.7011719
,
"text"
:
" tout"
},
{
"id"
:
39261
,
"logprob"
:
-1.5732422
,
"text"
:
" d'abord"
}
],
"seed"
:
null
,
"tokens"
:
[
{
"id"
:
578
,
"logprob"
:
-1.7529297
,
"special"
:
false
,
"text"
:
" le"
},
{
"id"
:
5608
,
"logprob"
:
-2.6054688
,
"special"
:
false
,
"text"
:
" faire"
},
{
"id"
:
1767
,
"logprob"
:
-1.5283203
,
"special"
:
false
,
"text"
:
" cu"
},
{
"id"
:
1273
,
"logprob"
:
-0.00010049343
,
"special"
:
false
,
"text"
:
"ire"
},
{
"id"
:
1486
,
"logprob"
:
-1.4716797
,
"special"
:
false
,
"text"
:
" dans"
},
{
"id"
:
283
,
"logprob"
:
-1.1982422
,
"special"
:
false
,
"text"
:
" de"
},
{
"id"
:
40410
,
"logprob"
:
-0.11853027
,
"special"
:
false
,
"text"
:
" l'eau"
},
{
"id"
:
20226
,
"logprob"
:
-0.41210938
,
"special"
:
false
,
"text"
:
" bou"
},
{
"id"
:
172483
,
"logprob"
:
-0.0037765503
,
"special"
:
false
,
"text"
:
"illante"
},
{
"id"
:
2805
,
"logprob"
:
-1.0166016
,
"special"
:
false
,
"text"
:
" sal"
}
]
},
"generated_text"
:
" le faire cuire dans de l'eau bouillante sal"
},
{
"details"
:
{
"best_of_sequences"
:
null
,
"finish_reason"
:
"length"
,
"generated_tokens"
:
10
,
"prefill"
:
[
{
"id"
:
17934
,
"logprob"
:
null
,
"text"
:
"Pour"
},
{
"id"
:
49833
,
"logprob"
:
-10.515625
,
"text"
:
" dég"
},
{
"id"
:
21543
,
"logprob"
:
-0.1484375
,
"text"
:
"uster"
},
{
"id"
:
447
,
"logprob"
:
-1.9287109
,
"text"
:
" un"
},
{
"id"
:
46341
,
"logprob"
:
-15.34375
,
"text"
:
" ort"
},
{
"id"
:
35567
,
"logprob"
:
-7.515625
,
"text"
:
"olan"
},
{
"id"
:
15
,
"logprob"
:
-1.4199219
,
"text"
:
","
},
{
"id"
:
1669
,
"logprob"
:
-1.5664062
,
"text"
:
" il"
},
{
"id"
:
11580
,
"logprob"
:
-0.94091797
,
"text"
:
" faut"
},
{
"id"
:
3913
,
"logprob"
:
-3.6660156
,
"text"
:
" tout"
},
{
"id"
:
39261
,
"logprob"
:
-1.7753906
,
"text"
:
" d'abord"
}
],
"seed"
:
null
,
"tokens"
:
[
{
"id"
:
578
,
"logprob"
:
-1.7626953
,
"special"
:
false
,
"text"
:
" le"
},
{
"id"
:
5608
,
"logprob"
:
-2.5820312
,
"special"
:
false
,
"text"
:
" faire"
},
{
"id"
:
1767
,
"logprob"
:
-1.5097656
,
"special"
:
false
,
"text"
:
" cu"
},
{
"id"
:
1273
,
"logprob"
:
-9.393692e-05
,
"special"
:
false
,
"text"
:
"ire"
},
{
"id"
:
1486
,
"logprob"
:
-1.5175781
,
"special"
:
false
,
"text"
:
" dans"
},
{
"id"
:
283
,
"logprob"
:
-1.1982422
,
"special"
:
false
,
"text"
:
" de"
},
{
"id"
:
40410
,
"logprob"
:
-0.11883545
,
"special"
:
false
,
"text"
:
" l'eau"
},
{
"id"
:
20226
,
"logprob"
:
-0.4909668
,
"special"
:
false
,
"text"
:
" bou"
},
{
"id"
:
172483
,
"logprob"
:
-0.003047943
,
"special"
:
false
,
"text"
:
"illante"
},
{
"id"
:
2805
,
"logprob"
:
-1.0185547
,
"special"
:
false
,
"text"
:
" sal"
}
]
},
"generated_text"
:
" le faire cuire dans de l'eau bouillante sal"
},
{
"details"
:
{
"best_of_sequences"
:
null
,
"finish_reason"
:
"length"
,
"generated_tokens"
:
10
,
"prefill"
:
[
{
"id"
:
17934
,
"logprob"
:
null
,
"text"
:
"Pour"
},
{
"id"
:
49833
,
"logprob"
:
-10.515625
,
"text"
:
" dég"
},
{
"id"
:
21543
,
"logprob"
:
-0.1484375
,
"text"
:
"uster"
},
{
"id"
:
447
,
"logprob"
:
-1.9287109
,
"text"
:
" un"
},
{
"id"
:
46341
,
"logprob"
:
-15.34375
,
"text"
:
" ort"
},
{
"id"
:
35567
,
"logprob"
:
-7.515625
,
"text"
:
"olan"
},
{
"id"
:
15
,
"logprob"
:
-1.4199219
,
"text"
:
","
},
{
"id"
:
1669
,
"logprob"
:
-1.5664062
,
"text"
:
" il"
},
{
"id"
:
11580
,
"logprob"
:
-0.94091797
,
"text"
:
" faut"
},
{
"id"
:
3913
,
"logprob"
:
-3.6660156
,
"text"
:
" tout"
},
{
"id"
:
39261
,
"logprob"
:
-1.7753906
,
"text"
:
" d'abord"
}
],
"seed"
:
null
,
"tokens"
:
[
{
"id"
:
578
,
"logprob"
:
-1.7626953
,
"special"
:
false
,
"text"
:
" le"
},
{
"id"
:
5608
,
"logprob"
:
-2.5820312
,
"special"
:
false
,
"text"
:
" faire"
},
{
"id"
:
1767
,
"logprob"
:
-1.5097656
,
"special"
:
false
,
"text"
:
" cu"
},
{
"id"
:
1273
,
"logprob"
:
-9.393692e-05
,
"special"
:
false
,
"text"
:
"ire"
},
{
"id"
:
1486
,
"logprob"
:
-1.5175781
,
"special"
:
false
,
"text"
:
" dans"
},
{
"id"
:
283
,
"logprob"
:
-1.1982422
,
"special"
:
false
,
"text"
:
" de"
},
{
"id"
:
40410
,
"logprob"
:
-0.11883545
,
"special"
:
false
,
"text"
:
" l'eau"
},
{
"id"
:
20226
,
"logprob"
:
-0.4909668
,
"special"
:
false
,
"text"
:
" bou"
},
{
"id"
:
172483
,
"logprob"
:
-0.003047943
,
"special"
:
false
,
"text"
:
"illante"
},
{
"id"
:
2805
,
"logprob"
:
-1.0185547
,
"special"
:
false
,
"text"
:
" sal"
}
]
},
"generated_text"
:
" le faire cuire dans de l'eau bouillante sal"
},
{
"details"
:
{
"best_of_sequences"
:
null
,
"finish_reason"
:
"length"
,
"generated_tokens"
:
10
,
"prefill"
:
[
{
"id"
:
17934
,
"logprob"
:
null
,
"text"
:
"Pour"
},
{
"id"
:
49833
,
"logprob"
:
-10.515625
,
"text"
:
" dég"
},
{
"id"
:
21543
,
"logprob"
:
-0.1484375
,
"text"
:
"uster"
},
{
"id"
:
447
,
"logprob"
:
-1.9287109
,
"text"
:
" un"
},
{
"id"
:
46341
,
"logprob"
:
-15.34375
,
"text"
:
" ort"
},
{
"id"
:
35567
,
"logprob"
:
-7.515625
,
"text"
:
"olan"
},
{
"id"
:
15
,
"logprob"
:
-1.4199219
,
"text"
:
","
},
{
"id"
:
1669
,
"logprob"
:
-1.5664062
,
"text"
:
" il"
},
{
"id"
:
11580
,
"logprob"
:
-0.94091797
,
"text"
:
" faut"
},
{
"id"
:
3913
,
"logprob"
:
-3.6660156
,
"text"
:
" tout"
},
{
"id"
:
39261
,
"logprob"
:
-1.7753906
,
"text"
:
" d'abord"
}
],
"seed"
:
null
,
"tokens"
:
[
{
"id"
:
578
,
"logprob"
:
-1.7626953
,
"special"
:
false
,
"text"
:
" le"
},
{
"id"
:
5608
,
"logprob"
:
-2.5820312
,
"special"
:
false
,
"text"
:
" faire"
},
{
"id"
:
1767
,
"logprob"
:
-1.5097656
,
"special"
:
false
,
"text"
:
" cu"
},
{
"id"
:
1273
,
"logprob"
:
-9.393692e-05
,
"special"
:
false
,
"text"
:
"ire"
},
{
"id"
:
1486
,
"logprob"
:
-1.5175781
,
"special"
:
false
,
"text"
:
" dans"
},
{
"id"
:
283
,
"logprob"
:
-1.1982422
,
"special"
:
false
,
"text"
:
" de"
},
{
"id"
:
40410
,
"logprob"
:
-0.11883545
,
"special"
:
false
,
"text"
:
" l'eau"
},
{
"id"
:
20226
,
"logprob"
:
-0.4909668
,
"special"
:
false
,
"text"
:
" bou"
},
{
"id"
:
172483
,
"logprob"
:
-0.003047943
,
"special"
:
false
,
"text"
:
"illante"
},
{
"id"
:
2805
,
"logprob"
:
-1.0185547
,
"special"
:
false
,
"text"
:
" sal"
}
]
},
"generated_text"
:
" le faire cuire dans de l'eau bouillante sal"
}
]
integration-tests/models/__snapshots__/test_flash_llama.ambr
deleted
100644 → 0
View file @
e71471be
# serializer version: 1
# name: test_flash_llama
dict({
'details': dict({
'best_of_sequences': None,
'finish_reason': <FinishReason.Length: 'length'>,
'generated_tokens': 10,
'prefill': list([
dict({
'id': 1,
'text': '<s>',
}),
dict({
'id': 4321,
'text': 'Test',
}),
dict({
'id': 2009,
'text': 'request',
}),
]),
'seed': None,
'tokens': list([
dict({
'id': 363,
'special': False,
'text': ' for',
}),
dict({
'id': 847,
'special': False,
'text': ' /',
}),
dict({
'id': 2754,
'special': False,
'text': 'api',
}),
dict({
'id': 29914,
'special': False,
'text': '/',
}),
dict({
'id': 29894,
'special': False,
'text': 'v',
}),
dict({
'id': 29896,
'special': False,
'text': '1',
}),
dict({
'id': 29914,
'special': False,
'text': '/',
}),
dict({
'id': 16418,
'special': False,
'text': 'projects',
}),
dict({
'id': 29914,
'special': False,
'text': '/',
}),
dict({
'id': 29896,
'special': False,
'text': '1',
}),
]),
}),
'generated_text': 'for /api/v1/projects/1',
})
# ---
# name: test_flash_llama_all_params
dict({
'details': dict({
'best_of_sequences': None,
'finish_reason': <FinishReason.Length: 'length'>,
'generated_tokens': 10,
'prefill': list([
dict({
'id': 1,
'text': '<s>',
}),
dict({
'id': 4321,
'text': 'Test',
}),
dict({
'id': 2009,
'text': 'request',
}),
]),
'seed': 0,
'tokens': list([
dict({
'id': 5229,
'special': False,
'text': ' failed',
}),
dict({
'id': 363,
'special': False,
'text': ' for',
}),
dict({
'id': 5641,
'special': False,
'text': ' IP',
}),
dict({
'id': 16428,
'special': False,
'text': ' Address',
}),
dict({
'id': 29901,
'special': False,
'text': ':',
}),
dict({
'id': 525,
'special': False,
'text': " '",
}),
dict({
'id': 8516,
'special': False,
'text': 'None',
}),
dict({
'id': 4286,
'special': False,
'text': "'.",
}),
dict({
'id': 13,
'special': False,
'text': '''
''',
}),
dict({
'id': 294,
'special': False,
'text': 'as',
}),
]),
}),
'generated_text': '''
Test requestfailed for IP Address: 'None'.
as
''',
})
# ---
# name: test_flash_llama_load
list([
dict({
'details': dict({
'best_of_sequences': None,
'finish_reason': <FinishReason.Length: 'length'>,
'generated_tokens': 10,
'prefill': list([
dict({
'id': 1,
'text': '<s>',
}),
dict({
'id': 4321,
'text': 'Test',
}),
dict({
'id': 2009,
'text': 'request',
}),
]),
'seed': None,
'tokens': list([
dict({
'id': 363,
'special': False,
'text': ' for',
}),
dict({
'id': 847,
'special': False,
'text': ' /',
}),
dict({
'id': 2754,
'special': False,
'text': 'api',
}),
dict({
'id': 29914,
'special': False,
'text': '/',
}),
dict({
'id': 29894,
'special': False,
'text': 'v',
}),
dict({
'id': 29896,
'special': False,
'text': '1',
}),
dict({
'id': 29914,
'special': False,
'text': '/',
}),
dict({
'id': 16418,
'special': False,
'text': 'projects',
}),
dict({
'id': 29914,
'special': False,
'text': '/',
}),
dict({
'id': 29896,
'special': False,
'text': '1',
}),
]),
}),
'generated_text': 'for /api/v1/projects/1',
}),
dict({
'details': dict({
'best_of_sequences': None,
'finish_reason': <FinishReason.Length: 'length'>,
'generated_tokens': 10,
'prefill': list([
dict({
'id': 1,
'text': '<s>',
}),
dict({
'id': 4321,
'text': 'Test',
}),
dict({
'id': 2009,
'text': 'request',
}),
]),
'seed': None,
'tokens': list([
dict({
'id': 363,
'special': False,
'text': ' for',
}),
dict({
'id': 847,
'special': False,
'text': ' /',
}),
dict({
'id': 2754,
'special': False,
'text': 'api',
}),
dict({
'id': 29914,
'special': False,
'text': '/',
}),
dict({
'id': 29894,
'special': False,
'text': 'v',
}),
dict({
'id': 29896,
'special': False,
'text': '1',
}),
dict({
'id': 29914,
'special': False,
'text': '/',
}),
dict({
'id': 16418,
'special': False,
'text': 'projects',
}),
dict({
'id': 29914,
'special': False,
'text': '/',
}),
dict({
'id': 29896,
'special': False,
'text': '1',
}),
]),
}),
'generated_text': 'for /api/v1/projects/1',
}),
dict({
'details': dict({
'best_of_sequences': None,
'finish_reason': <FinishReason.Length: 'length'>,
'generated_tokens': 10,
'prefill': list([
dict({
'id': 1,
'text': '<s>',
}),
dict({
'id': 4321,
'text': 'Test',
}),
dict({
'id': 2009,
'text': 'request',
}),
]),
'seed': None,
'tokens': list([
dict({
'id': 363,
'special': False,
'text': ' for',
}),
dict({
'id': 847,
'special': False,
'text': ' /',
}),
dict({
'id': 2754,
'special': False,
'text': 'api',
}),
dict({
'id': 29914,
'special': False,
'text': '/',
}),
dict({
'id': 29894,
'special': False,
'text': 'v',
}),
dict({
'id': 29896,
'special': False,
'text': '1',
}),
dict({
'id': 29914,
'special': False,
'text': '/',
}),
dict({
'id': 16418,
'special': False,
'text': 'projects',
}),
dict({
'id': 29914,
'special': False,
'text': '/',
}),
dict({
'id': 29896,
'special': False,
'text': '1',
}),
]),
}),
'generated_text': 'for /api/v1/projects/1',
}),
dict({
'details': dict({
'best_of_sequences': None,
'finish_reason': <FinishReason.Length: 'length'>,
'generated_tokens': 10,
'prefill': list([
dict({
'id': 1,
'text': '<s>',
}),
dict({
'id': 4321,
'text': 'Test',
}),
dict({
'id': 2009,
'text': 'request',
}),
]),
'seed': None,
'tokens': list([
dict({
'id': 363,
'special': False,
'text': ' for',
}),
dict({
'id': 847,
'special': False,
'text': ' /',
}),
dict({
'id': 2754,
'special': False,
'text': 'api',
}),
dict({
'id': 29914,
'special': False,
'text': '/',
}),
dict({
'id': 29894,
'special': False,
'text': 'v',
}),
dict({
'id': 29896,
'special': False,
'text': '1',
}),
dict({
'id': 29914,
'special': False,
'text': '/',
}),
dict({
'id': 16418,
'special': False,
'text': 'projects',
}),
dict({
'id': 29914,
'special': False,
'text': '/',
}),
dict({
'id': 29896,
'special': False,
'text': '1',
}),
]),
}),
'generated_text': 'for /api/v1/projects/1',
}),
])
# ---
integration-tests/models/__snapshots__/test_flash_llama/test_flash_llama.json
0 → 100644
View file @
dbdc587d
{
"details"
:
{
"best_of_sequences"
:
null
,
"finish_reason"
:
"length"
,
"generated_tokens"
:
10
,
"prefill"
:
[
{
"id"
:
1
,
"logprob"
:
null
,
"text"
:
"<s>"
},
{
"id"
:
4321
,
"logprob"
:
-8.6875
,
"text"
:
"Test"
},
{
"id"
:
2009
,
"logprob"
:
-11.5546875
,
"text"
:
"request"
}
],
"seed"
:
null
,
"tokens"
:
[
{
"id"
:
363
,
"logprob"
:
-1.5380859
,
"special"
:
false
,
"text"
:
" for"
},
{
"id"
:
847
,
"logprob"
:
-2.5917969
,
"special"
:
false
,
"text"
:
" /"
},
{
"id"
:
2754
,
"logprob"
:
-2.2773438
,
"special"
:
false
,
"text"
:
"api"
},
{
"id"
:
29914
,
"logprob"
:
-0.034362793
,
"special"
:
false
,
"text"
:
"/"
},
{
"id"
:
29894
,
"logprob"
:
-0.96533203
,
"special"
:
false
,
"text"
:
"v"
},
{
"id"
:
29896
,
"logprob"
:
-0.36669922
,
"special"
:
false
,
"text"
:
"1"
},
{
"id"
:
29914
,
"logprob"
:
-0.013122559
,
"special"
:
false
,
"text"
:
"/"
},
{
"id"
:
16418
,
"logprob"
:
-3.1503906
,
"special"
:
false
,
"text"
:
"projects"
},
{
"id"
:
29914
,
"logprob"
:
-0.43652344
,
"special"
:
false
,
"text"
:
"/"
},
{
"id"
:
29896
,
"logprob"
:
-1.9404297
,
"special"
:
false
,
"text"
:
"1"
}
]
},
"generated_text"
:
"for /api/v1/projects/1"
}
integration-tests/models/__snapshots__/test_flash_llama/test_flash_llama_all_params.json
0 → 100644
View file @
dbdc587d
{
"details"
:
{
"best_of_sequences"
:
null
,
"finish_reason"
:
"length"
,
"generated_tokens"
:
10
,
"prefill"
:
[
{
"id"
:
1
,
"logprob"
:
null
,
"text"
:
"<s>"
},
{
"id"
:
4321
,
"logprob"
:
-8.6875
,
"text"
:
"Test"
},
{
"id"
:
2009
,
"logprob"
:
-11.5546875
,
"text"
:
"request"
}
],
"seed"
:
0
,
"tokens"
:
[
{
"id"
:
5229
,
"logprob"
:
-3.3085938
,
"special"
:
false
,
"text"
:
" failed"
},
{
"id"
:
363
,
"logprob"
:
-3.984375
,
"special"
:
false
,
"text"
:
" for"
},
{
"id"
:
5641
,
"logprob"
:
-6.53125
,
"special"
:
false
,
"text"
:
" IP"
},
{
"id"
:
16428
,
"logprob"
:
-3.1835938
,
"special"
:
false
,
"text"
:
" Address"
},
{
"id"
:
29901
,
"logprob"
:
-1.2324219
,
"special"
:
false
,
"text"
:
":"
},
{
"id"
:
525
,
"logprob"
:
-2.6855469
,
"special"
:
false
,
"text"
:
" '"
},
{
"id"
:
8516
,
"logprob"
:
-7.1601562
,
"special"
:
false
,
"text"
:
"None"
},
{
"id"
:
4286
,
"logprob"
:
-2.4433594
,
"special"
:
false
,
"text"
:
"'."
},
{
"id"
:
13
,
"logprob"
:
-0.06530762
,
"special"
:
false
,
"text"
:
"
\n
"
},
{
"id"
:
294
,
"logprob"
:
-7.953125
,
"special"
:
false
,
"text"
:
"as"
}
]
},
"generated_text"
:
"Test requestfailed for IP Address: 'None'.
\n
as"
}
integration-tests/models/__snapshots__/test_flash_llama/test_flash_llama_load.json
0 → 100644
View file @
dbdc587d
[
{
"details"
:
{
"best_of_sequences"
:
null
,
"finish_reason"
:
"length"
,
"generated_tokens"
:
10
,
"prefill"
:
[
{
"id"
:
1
,
"logprob"
:
null
,
"text"
:
"<s>"
},
{
"id"
:
4321
,
"logprob"
:
-8.6875
,
"text"
:
"Test"
},
{
"id"
:
2009
,
"logprob"
:
-11.5546875
,
"text"
:
"request"
}
],
"seed"
:
null
,
"tokens"
:
[
{
"id"
:
363
,
"logprob"
:
-1.5322266
,
"special"
:
false
,
"text"
:
" for"
},
{
"id"
:
847
,
"logprob"
:
-2.5585938
,
"special"
:
false
,
"text"
:
" /"
},
{
"id"
:
2754
,
"logprob"
:
-2.265625
,
"special"
:
false
,
"text"
:
"api"
},
{
"id"
:
29914
,
"logprob"
:
-0.034088135
,
"special"
:
false
,
"text"
:
"/"
},
{
"id"
:
29894
,
"logprob"
:
-0.96240234
,
"special"
:
false
,
"text"
:
"v"
},
{
"id"
:
29896
,
"logprob"
:
-0.36816406
,
"special"
:
false
,
"text"
:
"1"
},
{
"id"
:
29914
,
"logprob"
:
-0.013191223
,
"special"
:
false
,
"text"
:
"/"
},
{
"id"
:
16418
,
"logprob"
:
-3.15625
,
"special"
:
false
,
"text"
:
"projects"
},
{
"id"
:
29914
,
"logprob"
:
-0.43774414
,
"special"
:
false
,
"text"
:
"/"
},
{
"id"
:
29896
,
"logprob"
:
-1.9443359
,
"special"
:
false
,
"text"
:
"1"
}
]
},
"generated_text"
:
"for /api/v1/projects/1"
},
{
"details"
:
{
"best_of_sequences"
:
null
,
"finish_reason"
:
"length"
,
"generated_tokens"
:
10
,
"prefill"
:
[
{
"id"
:
1
,
"logprob"
:
null
,
"text"
:
"<s>"
},
{
"id"
:
4321
,
"logprob"
:
-8.6875
,
"text"
:
"Test"
},
{
"id"
:
2009
,
"logprob"
:
-11.5546875
,
"text"
:
"request"
}
],
"seed"
:
null
,
"tokens"
:
[
{
"id"
:
363
,
"logprob"
:
-1.5380859
,
"special"
:
false
,
"text"
:
" for"
},
{
"id"
:
847
,
"logprob"
:
-2.5859375
,
"special"
:
false
,
"text"
:
" /"
},
{
"id"
:
2754
,
"logprob"
:
-2.2695312
,
"special"
:
false
,
"text"
:
"api"
},
{
"id"
:
29914
,
"logprob"
:
-0.03439331
,
"special"
:
false
,
"text"
:
"/"
},
{
"id"
:
29894
,
"logprob"
:
-0.96240234
,
"special"
:
false
,
"text"
:
"v"
},
{
"id"
:
29896
,
"logprob"
:
-0.36694336
,
"special"
:
false
,
"text"
:
"1"
},
{
"id"
:
29914
,
"logprob"
:
-0.013114929
,
"special"
:
false
,
"text"
:
"/"
},
{
"id"
:
16418
,
"logprob"
:
-3.1542969
,
"special"
:
false
,
"text"
:
"projects"
},
{
"id"
:
29914
,
"logprob"
:
-0.43847656
,
"special"
:
false
,
"text"
:
"/"
},
{
"id"
:
29896
,
"logprob"
:
-1.9433594
,
"special"
:
false
,
"text"
:
"1"
}
]
},
"generated_text"
:
"for /api/v1/projects/1"
},
{
"details"
:
{
"best_of_sequences"
:
null
,
"finish_reason"
:
"length"
,
"generated_tokens"
:
10
,
"prefill"
:
[
{
"id"
:
1
,
"logprob"
:
null
,
"text"
:
"<s>"
},
{
"id"
:
4321
,
"logprob"
:
-8.6875
,
"text"
:
"Test"
},
{
"id"
:
2009
,
"logprob"
:
-11.5546875
,
"text"
:
"request"
}
],
"seed"
:
null
,
"tokens"
:
[
{
"id"
:
363
,
"logprob"
:
-1.5322266
,
"special"
:
false
,
"text"
:
" for"
},
{
"id"
:
847
,
"logprob"
:
-2.5585938
,
"special"
:
false
,
"text"
:
" /"
},
{
"id"
:
2754
,
"logprob"
:
-2.265625
,
"special"
:
false
,
"text"
:
"api"
},
{
"id"
:
29914
,
"logprob"
:
-0.034088135
,
"special"
:
false
,
"text"
:
"/"
},
{
"id"
:
29894
,
"logprob"
:
-0.96240234
,
"special"
:
false
,
"text"
:
"v"
},
{
"id"
:
29896
,
"logprob"
:
-0.36816406
,
"special"
:
false
,
"text"
:
"1"
},
{
"id"
:
29914
,
"logprob"
:
-0.013191223
,
"special"
:
false
,
"text"
:
"/"
},
{
"id"
:
16418
,
"logprob"
:
-3.15625
,
"special"
:
false
,
"text"
:
"projects"
},
{
"id"
:
29914
,
"logprob"
:
-0.43774414
,
"special"
:
false
,
"text"
:
"/"
},
{
"id"
:
29896
,
"logprob"
:
-1.9443359
,
"special"
:
false
,
"text"
:
"1"
}
]
},
"generated_text"
:
"for /api/v1/projects/1"
},
{
"details"
:
{
"best_of_sequences"
:
null
,
"finish_reason"
:
"length"
,
"generated_tokens"
:
10
,
"prefill"
:
[
{
"id"
:
1
,
"logprob"
:
null
,
"text"
:
"<s>"
},
{
"id"
:
4321
,
"logprob"
:
-8.6875
,
"text"
:
"Test"
},
{
"id"
:
2009
,
"logprob"
:
-11.5546875
,
"text"
:
"request"
}
],
"seed"
:
null
,
"tokens"
:
[
{
"id"
:
363
,
"logprob"
:
-1.5322266
,
"special"
:
false
,
"text"
:
" for"
},
{
"id"
:
847
,
"logprob"
:
-2.5585938
,
"special"
:
false
,
"text"
:
" /"
},
{
"id"
:
2754
,
"logprob"
:
-2.265625
,
"special"
:
false
,
"text"
:
"api"
},
{
"id"
:
29914
,
"logprob"
:
-0.034088135
,
"special"
:
false
,
"text"
:
"/"
},
{
"id"
:
29894
,
"logprob"
:
-0.96240234
,
"special"
:
false
,
"text"
:
"v"
},
{
"id"
:
29896
,
"logprob"
:
-0.36816406
,
"special"
:
false
,
"text"
:
"1"
},
{
"id"
:
29914
,
"logprob"
:
-0.013191223
,
"special"
:
false
,
"text"
:
"/"
},
{
"id"
:
16418
,
"logprob"
:
-3.15625
,
"special"
:
false
,
"text"
:
"projects"
},
{
"id"
:
29914
,
"logprob"
:
-0.43774414
,
"special"
:
false
,
"text"
:
"/"
},
{
"id"
:
29896
,
"logprob"
:
-1.9443359
,
"special"
:
false
,
"text"
:
"1"
}
]
},
"generated_text"
:
"for /api/v1/projects/1"
}
]
integration-tests/models/__snapshots__/test_flash_neox.ambr
deleted
100644 → 0
View file @
e71471be
# serializer version: 1
# name: test_flash_neox
dict({
'details': dict({
'best_of_sequences': None,
'finish_reason': <FinishReason.Length: 'length'>,
'generated_tokens': 10,
'prefill': list([
dict({
'id': 50278,
'text': '<|prompter|>',
}),
dict({
'id': 1276,
'text': 'What',
}),
dict({
'id': 310,
'text': ' is',
}),
dict({
'id': 247,
'text': ' a',
}),
dict({
'id': 1167,
'text': ' mem',
}),
dict({
'id': 70,
'text': 'e',
}),
dict({
'id': 13,
'text': ',',
}),
dict({
'id': 285,
'text': ' and',
}),
dict({
'id': 752,
'text': ' what',
}),
dict({
'id': 434,
'text': "'s",
}),
dict({
'id': 253,
'text': ' the',
}),
dict({
'id': 2892,
'text': ' history',
}),
dict({
'id': 3212,
'text': ' behind',
}),
dict({
'id': 436,
'text': ' this',
}),
dict({
'id': 3159,
'text': ' word',
}),
dict({
'id': 32,
'text': '?',
}),
dict({
'id': 0,
'text': '<|endoftext|>',
}),
dict({
'id': 50281,
'text': '<|assistant|>',
}),
]),
'seed': None,
'tokens': list([
dict({
'id': 510,
'special': False,
'text': 'The',
}),
dict({
'id': 3159,
'special': False,
'text': ' word',
}),
dict({
'id': 346,
'special': False,
'text': ' "',
}),
dict({
'id': 6441,
'special': False,
'text': 'mem',
}),
dict({
'id': 70,
'special': False,
'text': 'e',
}),
dict({
'id': 3,
'special': False,
'text': '"',
}),
dict({
'id': 369,
'special': False,
'text': ' was',
}),
dict({
'id': 806,
'special': False,
'text': ' first',
}),
dict({
'id': 908,
'special': False,
'text': ' used',
}),
dict({
'id': 275,
'special': False,
'text': ' in',
}),
]),
}),
'generated_text': 'The word "meme" was first used in',
})
# ---
# name: test_flash_neox_load
list([
dict({
'details': dict({
'best_of_sequences': None,
'finish_reason': <FinishReason.Length: 'length'>,
'generated_tokens': 10,
'prefill': list([
dict({
'id': 50278,
'text': '<|prompter|>',
}),
dict({
'id': 1276,
'text': 'What',
}),
dict({
'id': 310,
'text': ' is',
}),
dict({
'id': 247,
'text': ' a',
}),
dict({
'id': 1167,
'text': ' mem',
}),
dict({
'id': 70,
'text': 'e',
}),
dict({
'id': 13,
'text': ',',
}),
dict({
'id': 285,
'text': ' and',
}),
dict({
'id': 752,
'text': ' what',
}),
dict({
'id': 434,
'text': "'s",
}),
dict({
'id': 253,
'text': ' the',
}),
dict({
'id': 2892,
'text': ' history',
}),
dict({
'id': 3212,
'text': ' behind',
}),
dict({
'id': 436,
'text': ' this',
}),
dict({
'id': 3159,
'text': ' word',
}),
dict({
'id': 32,
'text': '?',
}),
dict({
'id': 0,
'text': '<|endoftext|>',
}),
dict({
'id': 50281,
'text': '<|assistant|>',
}),
]),
'seed': None,
'tokens': list([
dict({
'id': 510,
'special': False,
'text': 'The',
}),
dict({
'id': 3159,
'special': False,
'text': ' word',
}),
dict({
'id': 346,
'special': False,
'text': ' "',
}),
dict({
'id': 6441,
'special': False,
'text': 'mem',
}),
dict({
'id': 70,
'special': False,
'text': 'e',
}),
dict({
'id': 3,
'special': False,
'text': '"',
}),
dict({
'id': 369,
'special': False,
'text': ' was',
}),
dict({
'id': 806,
'special': False,
'text': ' first',
}),
dict({
'id': 908,
'special': False,
'text': ' used',
}),
dict({
'id': 275,
'special': False,
'text': ' in',
}),
]),
}),
'generated_text': 'The word "meme" was first used in',
}),
dict({
'details': dict({
'best_of_sequences': None,
'finish_reason': <FinishReason.Length: 'length'>,
'generated_tokens': 10,
'prefill': list([
dict({
'id': 50278,
'text': '<|prompter|>',
}),
dict({
'id': 1276,
'text': 'What',
}),
dict({
'id': 310,
'text': ' is',
}),
dict({
'id': 247,
'text': ' a',
}),
dict({
'id': 1167,
'text': ' mem',
}),
dict({
'id': 70,
'text': 'e',
}),
dict({
'id': 13,
'text': ',',
}),
dict({
'id': 285,
'text': ' and',
}),
dict({
'id': 752,
'text': ' what',
}),
dict({
'id': 434,
'text': "'s",
}),
dict({
'id': 253,
'text': ' the',
}),
dict({
'id': 2892,
'text': ' history',
}),
dict({
'id': 3212,
'text': ' behind',
}),
dict({
'id': 436,
'text': ' this',
}),
dict({
'id': 3159,
'text': ' word',
}),
dict({
'id': 32,
'text': '?',
}),
dict({
'id': 0,
'text': '<|endoftext|>',
}),
dict({
'id': 50281,
'text': '<|assistant|>',
}),
]),
'seed': None,
'tokens': list([
dict({
'id': 510,
'special': False,
'text': 'The',
}),
dict({
'id': 3159,
'special': False,
'text': ' word',
}),
dict({
'id': 346,
'special': False,
'text': ' "',
}),
dict({
'id': 6441,
'special': False,
'text': 'mem',
}),
dict({
'id': 70,
'special': False,
'text': 'e',
}),
dict({
'id': 3,
'special': False,
'text': '"',
}),
dict({
'id': 369,
'special': False,
'text': ' was',
}),
dict({
'id': 806,
'special': False,
'text': ' first',
}),
dict({
'id': 908,
'special': False,
'text': ' used',
}),
dict({
'id': 275,
'special': False,
'text': ' in',
}),
]),
}),
'generated_text': 'The word "meme" was first used in',
}),
dict({
'details': dict({
'best_of_sequences': None,
'finish_reason': <FinishReason.Length: 'length'>,
'generated_tokens': 10,
'prefill': list([
dict({
'id': 50278,
'text': '<|prompter|>',
}),
dict({
'id': 1276,
'text': 'What',
}),
dict({
'id': 310,
'text': ' is',
}),
dict({
'id': 247,
'text': ' a',
}),
dict({
'id': 1167,
'text': ' mem',
}),
dict({
'id': 70,
'text': 'e',
}),
dict({
'id': 13,
'text': ',',
}),
dict({
'id': 285,
'text': ' and',
}),
dict({
'id': 752,
'text': ' what',
}),
dict({
'id': 434,
'text': "'s",
}),
dict({
'id': 253,
'text': ' the',
}),
dict({
'id': 2892,
'text': ' history',
}),
dict({
'id': 3212,
'text': ' behind',
}),
dict({
'id': 436,
'text': ' this',
}),
dict({
'id': 3159,
'text': ' word',
}),
dict({
'id': 32,
'text': '?',
}),
dict({
'id': 0,
'text': '<|endoftext|>',
}),
dict({
'id': 50281,
'text': '<|assistant|>',
}),
]),
'seed': None,
'tokens': list([
dict({
'id': 510,
'special': False,
'text': 'The',
}),
dict({
'id': 3159,
'special': False,
'text': ' word',
}),
dict({
'id': 346,
'special': False,
'text': ' "',
}),
dict({
'id': 6441,
'special': False,
'text': 'mem',
}),
dict({
'id': 70,
'special': False,
'text': 'e',
}),
dict({
'id': 3,
'special': False,
'text': '"',
}),
dict({
'id': 369,
'special': False,
'text': ' was',
}),
dict({
'id': 806,
'special': False,
'text': ' first',
}),
dict({
'id': 908,
'special': False,
'text': ' used',
}),
dict({
'id': 275,
'special': False,
'text': ' in',
}),
]),
}),
'generated_text': 'The word "meme" was first used in',
}),
dict({
'details': dict({
'best_of_sequences': None,
'finish_reason': <FinishReason.Length: 'length'>,
'generated_tokens': 10,
'prefill': list([
dict({
'id': 50278,
'text': '<|prompter|>',
}),
dict({
'id': 1276,
'text': 'What',
}),
dict({
'id': 310,
'text': ' is',
}),
dict({
'id': 247,
'text': ' a',
}),
dict({
'id': 1167,
'text': ' mem',
}),
dict({
'id': 70,
'text': 'e',
}),
dict({
'id': 13,
'text': ',',
}),
dict({
'id': 285,
'text': ' and',
}),
dict({
'id': 752,
'text': ' what',
}),
dict({
'id': 434,
'text': "'s",
}),
dict({
'id': 253,
'text': ' the',
}),
dict({
'id': 2892,
'text': ' history',
}),
dict({
'id': 3212,
'text': ' behind',
}),
dict({
'id': 436,
'text': ' this',
}),
dict({
'id': 3159,
'text': ' word',
}),
dict({
'id': 32,
'text': '?',
}),
dict({
'id': 0,
'text': '<|endoftext|>',
}),
dict({
'id': 50281,
'text': '<|assistant|>',
}),
]),
'seed': None,
'tokens': list([
dict({
'id': 510,
'special': False,
'text': 'The',
}),
dict({
'id': 3159,
'special': False,
'text': ' word',
}),
dict({
'id': 346,
'special': False,
'text': ' "',
}),
dict({
'id': 6441,
'special': False,
'text': 'mem',
}),
dict({
'id': 70,
'special': False,
'text': 'e',
}),
dict({
'id': 3,
'special': False,
'text': '"',
}),
dict({
'id': 369,
'special': False,
'text': ' was',
}),
dict({
'id': 806,
'special': False,
'text': ' first',
}),
dict({
'id': 908,
'special': False,
'text': ' used',
}),
dict({
'id': 275,
'special': False,
'text': ' in',
}),
]),
}),
'generated_text': 'The word "meme" was first used in',
}),
])
# ---
integration-tests/models/__snapshots__/test_flash_neox/test_flash_neox.json
0 → 100644
View file @
dbdc587d
{
"details"
:
{
"best_of_sequences"
:
null
,
"finish_reason"
:
"length"
,
"generated_tokens"
:
10
,
"prefill"
:
[
{
"id"
:
50278
,
"logprob"
:
null
,
"text"
:
"<|prompter|>"
},
{
"id"
:
1276
,
"logprob"
:
-8.03125
,
"text"
:
"What"
},
{
"id"
:
310
,
"logprob"
:
-5.421875
,
"text"
:
" is"
},
{
"id"
:
247
,
"logprob"
:
-2.1601562
,
"text"
:
" a"
},
{
"id"
:
1167
,
"logprob"
:
-5.4609375
,
"text"
:
" mem"
},
{
"id"
:
70
,
"logprob"
:
-0.005657196
,
"text"
:
"e"
},
{
"id"
:
13
,
"logprob"
:
-7.28125
,
"text"
:
","
},
{
"id"
:
285
,
"logprob"
:
-0.2980957
,
"text"
:
" and"
},
{
"id"
:
752
,
"logprob"
:
-2.1679688
,
"text"
:
" what"
},
{
"id"
:
434
,
"logprob"
:
-5.6210938
,
"text"
:
"'s"
},
{
"id"
:
253
,
"logprob"
:
-0.81103516
,
"text"
:
" the"
},
{
"id"
:
2892
,
"logprob"
:
-6.6640625
,
"text"
:
" history"
},
{
"id"
:
3212
,
"logprob"
:
-2.265625
,
"text"
:
" behind"
},
{
"id"
:
436
,
"logprob"
:
-11.5078125
,
"text"
:
" this"
},
{
"id"
:
3159
,
"logprob"
:
-2.1582031
,
"text"
:
" word"
},
{
"id"
:
32
,
"logprob"
:
-0.008720398
,
"text"
:
"?"
},
{
"id"
:
0
,
"logprob"
:
-2.4726562
,
"text"
:
"<|endoftext|>"
},
{
"id"
:
50281
,
"logprob"
:
-18.265625
,
"text"
:
"<|assistant|>"
}
],
"seed"
:
null
,
"tokens"
:
[
{
"id"
:
510
,
"logprob"
:
-0.63183594
,
"special"
:
false
,
"text"
:
"The"
},
{
"id"
:
3159
,
"logprob"
:
-0.5390625
,
"special"
:
false
,
"text"
:
" word"
},
{
"id"
:
346
,
"logprob"
:
-0.045684814
,
"special"
:
false
,
"text"
:
"
\"
"
},
{
"id"
:
6441
,
"logprob"
:
-0.002090454
,
"special"
:
false
,
"text"
:
"mem"
},
{
"id"
:
70
,
"logprob"
:
-1.3589859e-05
,
"special"
:
false
,
"text"
:
"e"
},
{
"id"
:
3
,
"logprob"
:
-0.0009455681
,
"special"
:
false
,
"text"
:
"
\"
"
},
{
"id"
:
369
,
"logprob"
:
-0.088012695
,
"special"
:
false
,
"text"
:
" was"
},
{
"id"
:
806
,
"logprob"
:
-0.12585449
,
"special"
:
false
,
"text"
:
" first"
},
{
"id"
:
908
,
"logprob"
:
-0.017196655
,
"special"
:
false
,
"text"
:
" used"
},
{
"id"
:
275
,
"logprob"
:
-0.49731445
,
"special"
:
false
,
"text"
:
" in"
}
]
},
"generated_text"
:
"The word
\"
meme
\"
was first used in"
}
integration-tests/models/__snapshots__/test_flash_neox/test_flash_neox_load.json
0 → 100644
View file @
dbdc587d
[
{
"details"
:
{
"best_of_sequences"
:
null
,
"finish_reason"
:
"length"
,
"generated_tokens"
:
10
,
"prefill"
:
[
{
"id"
:
50278
,
"logprob"
:
null
,
"text"
:
"<|prompter|>"
},
{
"id"
:
1276
,
"logprob"
:
-8.03125
,
"text"
:
"What"
},
{
"id"
:
310
,
"logprob"
:
-5.421875
,
"text"
:
" is"
},
{
"id"
:
247
,
"logprob"
:
-2.1601562
,
"text"
:
" a"
},
{
"id"
:
1167
,
"logprob"
:
-5.4609375
,
"text"
:
" mem"
},
{
"id"
:
70
,
"logprob"
:
-0.005657196
,
"text"
:
"e"
},
{
"id"
:
13
,
"logprob"
:
-7.28125
,
"text"
:
","
},
{
"id"
:
285
,
"logprob"
:
-0.2980957
,
"text"
:
" and"
},
{
"id"
:
752
,
"logprob"
:
-2.1679688
,
"text"
:
" what"
},
{
"id"
:
434
,
"logprob"
:
-5.6210938
,
"text"
:
"'s"
},
{
"id"
:
253
,
"logprob"
:
-0.81103516
,
"text"
:
" the"
},
{
"id"
:
2892
,
"logprob"
:
-6.6640625
,
"text"
:
" history"
},
{
"id"
:
3212
,
"logprob"
:
-2.265625
,
"text"
:
" behind"
},
{
"id"
:
436
,
"logprob"
:
-11.5078125
,
"text"
:
" this"
},
{
"id"
:
3159
,
"logprob"
:
-2.1582031
,
"text"
:
" word"
},
{
"id"
:
32
,
"logprob"
:
-0.008720398
,
"text"
:
"?"
},
{
"id"
:
0
,
"logprob"
:
-2.4726562
,
"text"
:
"<|endoftext|>"
},
{
"id"
:
50281
,
"logprob"
:
-18.265625
,
"text"
:
"<|assistant|>"
}
],
"seed"
:
null
,
"tokens"
:
[
{
"id"
:
510
,
"logprob"
:
-0.63183594
,
"special"
:
false
,
"text"
:
"The"
},
{
"id"
:
3159
,
"logprob"
:
-0.5488281
,
"special"
:
false
,
"text"
:
" word"
},
{
"id"
:
346
,
"logprob"
:
-0.045684814
,
"special"
:
false
,
"text"
:
"
\"
"
},
{
"id"
:
6441
,
"logprob"
:
-0.00207901
,
"special"
:
false
,
"text"
:
"mem"
},
{
"id"
:
70
,
"logprob"
:
-1.335144e-05
,
"special"
:
false
,
"text"
:
"e"
},
{
"id"
:
3
,
"logprob"
:
-0.00097227097
,
"special"
:
false
,
"text"
:
"
\"
"
},
{
"id"
:
369
,
"logprob"
:
-0.0892334
,
"special"
:
false
,
"text"
:
" was"
},
{
"id"
:
806
,
"logprob"
:
-0.12463379
,
"special"
:
false
,
"text"
:
" first"
},
{
"id"
:
908
,
"logprob"
:
-0.01737976
,
"special"
:
false
,
"text"
:
" used"
},
{
"id"
:
275
,
"logprob"
:
-0.50341797
,
"special"
:
false
,
"text"
:
" in"
}
]
},
"generated_text"
:
"The word
\"
meme
\"
was first used in"
},
{
"details"
:
{
"best_of_sequences"
:
null
,
"finish_reason"
:
"length"
,
"generated_tokens"
:
10
,
"prefill"
:
[
{
"id"
:
50278
,
"logprob"
:
null
,
"text"
:
"<|prompter|>"
},
{
"id"
:
1276
,
"logprob"
:
-8.03125
,
"text"
:
"What"
},
{
"id"
:
310
,
"logprob"
:
-5.421875
,
"text"
:
" is"
},
{
"id"
:
247
,
"logprob"
:
-2.1601562
,
"text"
:
" a"
},
{
"id"
:
1167
,
"logprob"
:
-5.4609375
,
"text"
:
" mem"
},
{
"id"
:
70
,
"logprob"
:
-0.005657196
,
"text"
:
"e"
},
{
"id"
:
13
,
"logprob"
:
-7.28125
,
"text"
:
","
},
{
"id"
:
285
,
"logprob"
:
-0.2980957
,
"text"
:
" and"
},
{
"id"
:
752
,
"logprob"
:
-2.1679688
,
"text"
:
" what"
},
{
"id"
:
434
,
"logprob"
:
-5.6210938
,
"text"
:
"'s"
},
{
"id"
:
253
,
"logprob"
:
-0.81103516
,
"text"
:
" the"
},
{
"id"
:
2892
,
"logprob"
:
-6.6640625
,
"text"
:
" history"
},
{
"id"
:
3212
,
"logprob"
:
-2.265625
,
"text"
:
" behind"
},
{
"id"
:
436
,
"logprob"
:
-11.5078125
,
"text"
:
" this"
},
{
"id"
:
3159
,
"logprob"
:
-2.1582031
,
"text"
:
" word"
},
{
"id"
:
32
,
"logprob"
:
-0.008720398
,
"text"
:
"?"
},
{
"id"
:
0
,
"logprob"
:
-2.4726562
,
"text"
:
"<|endoftext|>"
},
{
"id"
:
50281
,
"logprob"
:
-18.265625
,
"text"
:
"<|assistant|>"
}
],
"seed"
:
null
,
"tokens"
:
[
{
"id"
:
510
,
"logprob"
:
-0.63183594
,
"special"
:
false
,
"text"
:
"The"
},
{
"id"
:
3159
,
"logprob"
:
-0.5488281
,
"special"
:
false
,
"text"
:
" word"
},
{
"id"
:
346
,
"logprob"
:
-0.045684814
,
"special"
:
false
,
"text"
:
"
\"
"
},
{
"id"
:
6441
,
"logprob"
:
-0.00207901
,
"special"
:
false
,
"text"
:
"mem"
},
{
"id"
:
70
,
"logprob"
:
-1.335144e-05
,
"special"
:
false
,
"text"
:
"e"
},
{
"id"
:
3
,
"logprob"
:
-0.00097227097
,
"special"
:
false
,
"text"
:
"
\"
"
},
{
"id"
:
369
,
"logprob"
:
-0.0892334
,
"special"
:
false
,
"text"
:
" was"
},
{
"id"
:
806
,
"logprob"
:
-0.12463379
,
"special"
:
false
,
"text"
:
" first"
},
{
"id"
:
908
,
"logprob"
:
-0.01737976
,
"special"
:
false
,
"text"
:
" used"
},
{
"id"
:
275
,
"logprob"
:
-0.50341797
,
"special"
:
false
,
"text"
:
" in"
}
]
},
"generated_text"
:
"The word
\"
meme
\"
was first used in"
},
{
"details"
:
{
"best_of_sequences"
:
null
,
"finish_reason"
:
"length"
,
"generated_tokens"
:
10
,
"prefill"
:
[
{
"id"
:
50278
,
"logprob"
:
null
,
"text"
:
"<|prompter|>"
},
{
"id"
:
1276
,
"logprob"
:
-8.03125
,
"text"
:
"What"
},
{
"id"
:
310
,
"logprob"
:
-5.421875
,
"text"
:
" is"
},
{
"id"
:
247
,
"logprob"
:
-2.1601562
,
"text"
:
" a"
},
{
"id"
:
1167
,
"logprob"
:
-5.4609375
,
"text"
:
" mem"
},
{
"id"
:
70
,
"logprob"
:
-0.005657196
,
"text"
:
"e"
},
{
"id"
:
13
,
"logprob"
:
-7.28125
,
"text"
:
","
},
{
"id"
:
285
,
"logprob"
:
-0.2980957
,
"text"
:
" and"
},
{
"id"
:
752
,
"logprob"
:
-2.1679688
,
"text"
:
" what"
},
{
"id"
:
434
,
"logprob"
:
-5.6210938
,
"text"
:
"'s"
},
{
"id"
:
253
,
"logprob"
:
-0.81103516
,
"text"
:
" the"
},
{
"id"
:
2892
,
"logprob"
:
-6.6640625
,
"text"
:
" history"
},
{
"id"
:
3212
,
"logprob"
:
-2.265625
,
"text"
:
" behind"
},
{
"id"
:
436
,
"logprob"
:
-11.5078125
,
"text"
:
" this"
},
{
"id"
:
3159
,
"logprob"
:
-2.1582031
,
"text"
:
" word"
},
{
"id"
:
32
,
"logprob"
:
-0.008720398
,
"text"
:
"?"
},
{
"id"
:
0
,
"logprob"
:
-2.4726562
,
"text"
:
"<|endoftext|>"
},
{
"id"
:
50281
,
"logprob"
:
-18.265625
,
"text"
:
"<|assistant|>"
}
],
"seed"
:
null
,
"tokens"
:
[
{
"id"
:
510
,
"logprob"
:
-0.63183594
,
"special"
:
false
,
"text"
:
"The"
},
{
"id"
:
3159
,
"logprob"
:
-0.5488281
,
"special"
:
false
,
"text"
:
" word"
},
{
"id"
:
346
,
"logprob"
:
-0.045684814
,
"special"
:
false
,
"text"
:
"
\"
"
},
{
"id"
:
6441
,
"logprob"
:
-0.00207901
,
"special"
:
false
,
"text"
:
"mem"
},
{
"id"
:
70
,
"logprob"
:
-1.335144e-05
,
"special"
:
false
,
"text"
:
"e"
},
{
"id"
:
3
,
"logprob"
:
-0.00097227097
,
"special"
:
false
,
"text"
:
"
\"
"
},
{
"id"
:
369
,
"logprob"
:
-0.0892334
,
"special"
:
false
,
"text"
:
" was"
},
{
"id"
:
806
,
"logprob"
:
-0.12463379
,
"special"
:
false
,
"text"
:
" first"
},
{
"id"
:
908
,
"logprob"
:
-0.01737976
,
"special"
:
false
,
"text"
:
" used"
},
{
"id"
:
275
,
"logprob"
:
-0.50341797
,
"special"
:
false
,
"text"
:
" in"
}
]
},
"generated_text"
:
"The word
\"
meme
\"
was first used in"
},
{
"details"
:
{
"best_of_sequences"
:
null
,
"finish_reason"
:
"length"
,
"generated_tokens"
:
10
,
"prefill"
:
[
{
"id"
:
50278
,
"logprob"
:
null
,
"text"
:
"<|prompter|>"
},
{
"id"
:
1276
,
"logprob"
:
-8.03125
,
"text"
:
"What"
},
{
"id"
:
310
,
"logprob"
:
-5.421875
,
"text"
:
" is"
},
{
"id"
:
247
,
"logprob"
:
-2.1601562
,
"text"
:
" a"
},
{
"id"
:
1167
,
"logprob"
:
-5.4609375
,
"text"
:
" mem"
},
{
"id"
:
70
,
"logprob"
:
-0.005657196
,
"text"
:
"e"
},
{
"id"
:
13
,
"logprob"
:
-7.28125
,
"text"
:
","
},
{
"id"
:
285
,
"logprob"
:
-0.2980957
,
"text"
:
" and"
},
{
"id"
:
752
,
"logprob"
:
-2.1679688
,
"text"
:
" what"
},
{
"id"
:
434
,
"logprob"
:
-5.6210938
,
"text"
:
"'s"
},
{
"id"
:
253
,
"logprob"
:
-0.81103516
,
"text"
:
" the"
},
{
"id"
:
2892
,
"logprob"
:
-6.6640625
,
"text"
:
" history"
},
{
"id"
:
3212
,
"logprob"
:
-2.265625
,
"text"
:
" behind"
},
{
"id"
:
436
,
"logprob"
:
-11.5078125
,
"text"
:
" this"
},
{
"id"
:
3159
,
"logprob"
:
-2.1582031
,
"text"
:
" word"
},
{
"id"
:
32
,
"logprob"
:
-0.008720398
,
"text"
:
"?"
},
{
"id"
:
0
,
"logprob"
:
-2.4726562
,
"text"
:
"<|endoftext|>"
},
{
"id"
:
50281
,
"logprob"
:
-18.265625
,
"text"
:
"<|assistant|>"
}
],
"seed"
:
null
,
"tokens"
:
[
{
"id"
:
510
,
"logprob"
:
-0.63183594
,
"special"
:
false
,
"text"
:
"The"
},
{
"id"
:
3159
,
"logprob"
:
-0.5488281
,
"special"
:
false
,
"text"
:
" word"
},
{
"id"
:
346
,
"logprob"
:
-0.045684814
,
"special"
:
false
,
"text"
:
"
\"
"
},
{
"id"
:
6441
,
"logprob"
:
-0.00207901
,
"special"
:
false
,
"text"
:
"mem"
},
{
"id"
:
70
,
"logprob"
:
-1.335144e-05
,
"special"
:
false
,
"text"
:
"e"
},
{
"id"
:
3
,
"logprob"
:
-0.00097227097
,
"special"
:
false
,
"text"
:
"
\"
"
},
{
"id"
:
369
,
"logprob"
:
-0.0892334
,
"special"
:
false
,
"text"
:
" was"
},
{
"id"
:
806
,
"logprob"
:
-0.12463379
,
"special"
:
false
,
"text"
:
" first"
},
{
"id"
:
908
,
"logprob"
:
-0.01737976
,
"special"
:
false
,
"text"
:
" used"
},
{
"id"
:
275
,
"logprob"
:
-0.50341797
,
"special"
:
false
,
"text"
:
" in"
}
]
},
"generated_text"
:
"The word
\"
meme
\"
was first used in"
}
]
integration-tests/models/__snapshots__/test_flash_santacoder.ambr
deleted
100644 → 0
View file @
e71471be
# serializer version: 1
# name: test_flash_santacoder
dict({
'details': dict({
'best_of_sequences': None,
'finish_reason': <FinishReason.Length: 'length'>,
'generated_tokens': 10,
'prefill': list([
dict({
'id': 563,
'text': 'def',
}),
dict({
'id': 942,
'text': ' print',
}),
dict({
'id': 62,
'text': '_',
}),
dict({
'id': 7196,
'text': 'hello',
}),
]),
'seed': None,
'tokens': list([
dict({
'id': 1241,
'special': False,
'text': '():',
}),
dict({
'id': 258,
'special': False,
'text': '''
''',
}),
dict({
'id': 942,
'special': False,
'text': ' print',
}),
dict({
'id': 372,
'special': False,
'text': '("',
}),
dict({
'id': 7371,
'special': False,
'text': 'Hello',
}),
dict({
'id': 9956,
'special': False,
'text': ' World',
}),
dict({
'id': 8657,
'special': False,
'text': '!")',
}),
dict({
'id': 185,
'special': False,
'text': '''
''',
}),
dict({
'id': 185,
'special': False,
'text': '''
''',
}),
dict({
'id': 1018,
'special': False,
'text': 'print',
}),
]),
}),
'generated_text': '''
():
print("Hello World!")
print
''',
})
# ---
# name: test_flash_santacoder_load
list([
dict({
'details': dict({
'best_of_sequences': None,
'finish_reason': <FinishReason.Length: 'length'>,
'generated_tokens': 10,
'prefill': list([
dict({
'id': 563,
'text': 'def',
}),
dict({
'id': 942,
'text': ' print',
}),
dict({
'id': 62,
'text': '_',
}),
dict({
'id': 7196,
'text': 'hello',
}),
]),
'seed': None,
'tokens': list([
dict({
'id': 1241,
'special': False,
'text': '():',
}),
dict({
'id': 258,
'special': False,
'text': '''
''',
}),
dict({
'id': 942,
'special': False,
'text': ' print',
}),
dict({
'id': 372,
'special': False,
'text': '("',
}),
dict({
'id': 7371,
'special': False,
'text': 'Hello',
}),
dict({
'id': 9956,
'special': False,
'text': ' World',
}),
dict({
'id': 8657,
'special': False,
'text': '!")',
}),
dict({
'id': 185,
'special': False,
'text': '''
''',
}),
dict({
'id': 185,
'special': False,
'text': '''
''',
}),
dict({
'id': 1018,
'special': False,
'text': 'print',
}),
]),
}),
'generated_text': '''
():
print("Hello World!")
print
''',
}),
dict({
'details': dict({
'best_of_sequences': None,
'finish_reason': <FinishReason.Length: 'length'>,
'generated_tokens': 10,
'prefill': list([
dict({
'id': 563,
'text': 'def',
}),
dict({
'id': 942,
'text': ' print',
}),
dict({
'id': 62,
'text': '_',
}),
dict({
'id': 7196,
'text': 'hello',
}),
]),
'seed': None,
'tokens': list([
dict({
'id': 1241,
'special': False,
'text': '():',
}),
dict({
'id': 258,
'special': False,
'text': '''
''',
}),
dict({
'id': 942,
'special': False,
'text': ' print',
}),
dict({
'id': 372,
'special': False,
'text': '("',
}),
dict({
'id': 7371,
'special': False,
'text': 'Hello',
}),
dict({
'id': 9956,
'special': False,
'text': ' World',
}),
dict({
'id': 8657,
'special': False,
'text': '!")',
}),
dict({
'id': 185,
'special': False,
'text': '''
''',
}),
dict({
'id': 185,
'special': False,
'text': '''
''',
}),
dict({
'id': 1018,
'special': False,
'text': 'print',
}),
]),
}),
'generated_text': '''
():
print("Hello World!")
print
''',
}),
dict({
'details': dict({
'best_of_sequences': None,
'finish_reason': <FinishReason.Length: 'length'>,
'generated_tokens': 10,
'prefill': list([
dict({
'id': 563,
'text': 'def',
}),
dict({
'id': 942,
'text': ' print',
}),
dict({
'id': 62,
'text': '_',
}),
dict({
'id': 7196,
'text': 'hello',
}),
]),
'seed': None,
'tokens': list([
dict({
'id': 1241,
'special': False,
'text': '():',
}),
dict({
'id': 258,
'special': False,
'text': '''
''',
}),
dict({
'id': 942,
'special': False,
'text': ' print',
}),
dict({
'id': 372,
'special': False,
'text': '("',
}),
dict({
'id': 7371,
'special': False,
'text': 'Hello',
}),
dict({
'id': 9956,
'special': False,
'text': ' World',
}),
dict({
'id': 8657,
'special': False,
'text': '!")',
}),
dict({
'id': 185,
'special': False,
'text': '''
''',
}),
dict({
'id': 185,
'special': False,
'text': '''
''',
}),
dict({
'id': 1018,
'special': False,
'text': 'print',
}),
]),
}),
'generated_text': '''
():
print("Hello World!")
print
''',
}),
dict({
'details': dict({
'best_of_sequences': None,
'finish_reason': <FinishReason.Length: 'length'>,
'generated_tokens': 10,
'prefill': list([
dict({
'id': 563,
'text': 'def',
}),
dict({
'id': 942,
'text': ' print',
}),
dict({
'id': 62,
'text': '_',
}),
dict({
'id': 7196,
'text': 'hello',
}),
]),
'seed': None,
'tokens': list([
dict({
'id': 1241,
'special': False,
'text': '():',
}),
dict({
'id': 258,
'special': False,
'text': '''
''',
}),
dict({
'id': 942,
'special': False,
'text': ' print',
}),
dict({
'id': 372,
'special': False,
'text': '("',
}),
dict({
'id': 7371,
'special': False,
'text': 'Hello',
}),
dict({
'id': 9956,
'special': False,
'text': ' World',
}),
dict({
'id': 8657,
'special': False,
'text': '!")',
}),
dict({
'id': 185,
'special': False,
'text': '''
''',
}),
dict({
'id': 185,
'special': False,
'text': '''
''',
}),
dict({
'id': 1018,
'special': False,
'text': 'print',
}),
]),
}),
'generated_text': '''
():
print("Hello World!")
print
''',
}),
])
# ---
Prev
1
2
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment