test_bloom_560m.py 1.65 KB
Newer Older
1
2
import pytest

3
4
5
6
7

@pytest.fixture(scope="module")
def bloom_560_handle(launcher):
    with launcher("bigscience/bloom-560m") as handle:
        yield handle
8
9
10


@pytest.fixture(scope="module")
11
async def bloom_560(bloom_560_handle):
OlivierDehaene's avatar
OlivierDehaene committed
12
    await bloom_560_handle.health(240)
13
    return bloom_560_handle.client
14
15
16


@pytest.mark.asyncio
17
async def test_bloom_560m(bloom_560, response_snapshot):
18
19
20
21
    response = await bloom_560.generate(
        "Pour déguster un ortolan, il faut tout d'abord",
        max_new_tokens=10,
        top_p=0.9,
22
        decoder_input_details=True,
23
24
25
26
        seed=0,
    )

    assert response.details.generated_tokens == 10
27
    assert response == response_snapshot
28
29
30


@pytest.mark.asyncio
31
async def test_bloom_560m_all_params(bloom_560, response_snapshot):
32
33
34
35
36
37
38
39
40
41
42
43
    response = await bloom_560.generate(
        "Pour déguster un ortolan, il faut tout d'abord",
        max_new_tokens=10,
        repetition_penalty=1.2,
        return_full_text=True,
        stop_sequences=["test"],
        temperature=0.5,
        top_p=0.9,
        top_k=10,
        truncate=5,
        typical_p=0.9,
        watermark=True,
44
        decoder_input_details=True,
45
46
47
48
        seed=0,
    )

    assert response.details.generated_tokens == 10
49
    assert response == response_snapshot
50
51
52


@pytest.mark.asyncio
53
async def test_bloom_560m_load(bloom_560, generate_load, response_snapshot):
54
55
56
57
58
59
60
61
    responses = await generate_load(
        bloom_560,
        "Pour déguster un ortolan, il faut tout d'abord",
        max_new_tokens=10,
        n=4,
    )

    assert len(responses) == 4
62
    assert all([r.generated_text == responses[0].generated_text for r in responses])
63

64
    assert responses == response_snapshot