Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
Qwen2.5_pytorch
Commits
802ef8b7
Commit
802ef8b7
authored
Oct 11, 2024
by
luopl
Browse files
init
parents
Pipeline
#1743
failed with stages
in 0 seconds
Changes
263
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
3403 additions
and
0 deletions
+3403
-0
LLaMA-Factory/src/llamafactory/__init__.py
LLaMA-Factory/src/llamafactory/__init__.py
+46
-0
LLaMA-Factory/src/llamafactory/api/__init__.py
LLaMA-Factory/src/llamafactory/api/__init__.py
+0
-0
LLaMA-Factory/src/llamafactory/api/app.py
LLaMA-Factory/src/llamafactory/api/app.py
+134
-0
LLaMA-Factory/src/llamafactory/api/chat.py
LLaMA-Factory/src/llamafactory/api/chat.py
+237
-0
LLaMA-Factory/src/llamafactory/api/common.py
LLaMA-Factory/src/llamafactory/api/common.py
+34
-0
LLaMA-Factory/src/llamafactory/api/protocol.py
LLaMA-Factory/src/llamafactory/api/protocol.py
+153
-0
LLaMA-Factory/src/llamafactory/chat/__init__.py
LLaMA-Factory/src/llamafactory/chat/__init__.py
+19
-0
LLaMA-Factory/src/llamafactory/chat/base_engine.py
LLaMA-Factory/src/llamafactory/chat/base_engine.py
+102
-0
LLaMA-Factory/src/llamafactory/chat/chat_model.py
LLaMA-Factory/src/llamafactory/chat/chat_model.py
+187
-0
LLaMA-Factory/src/llamafactory/chat/hf_engine.py
LLaMA-Factory/src/llamafactory/chat/hf_engine.py
+343
-0
LLaMA-Factory/src/llamafactory/chat/vllm_engine.py
LLaMA-Factory/src/llamafactory/chat/vllm_engine.py
+230
-0
LLaMA-Factory/src/llamafactory/cli.py
LLaMA-Factory/src/llamafactory/cli.py
+121
-0
LLaMA-Factory/src/llamafactory/data/__init__.py
LLaMA-Factory/src/llamafactory/data/__init__.py
+37
-0
LLaMA-Factory/src/llamafactory/data/aligner.py
LLaMA-Factory/src/llamafactory/data/aligner.py
+258
-0
LLaMA-Factory/src/llamafactory/data/collator.py
LLaMA-Factory/src/llamafactory/data/collator.py
+189
-0
LLaMA-Factory/src/llamafactory/data/data_utils.py
LLaMA-Factory/src/llamafactory/data/data_utils.py
+92
-0
LLaMA-Factory/src/llamafactory/data/formatter.py
LLaMA-Factory/src/llamafactory/data/formatter.py
+148
-0
LLaMA-Factory/src/llamafactory/data/loader.py
LLaMA-Factory/src/llamafactory/data/loader.py
+292
-0
LLaMA-Factory/src/llamafactory/data/mm_plugin.py
LLaMA-Factory/src/llamafactory/data/mm_plugin.py
+627
-0
LLaMA-Factory/src/llamafactory/data/parser.py
LLaMA-Factory/src/llamafactory/data/parser.py
+154
-0
No files found.
LLaMA-Factory/src/llamafactory/__init__.py
0 → 100644
View file @
802ef8b7
# Copyright 2024 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r
"""
Efficient fine-tuning of large language models.
Level:
api, webui > chat, eval, train > data, model > hparams > extras
Dependency graph:
main:
transformers>=4.41.2,<=4.45.2
datasets>=2.16.0,<=2.21.0
accelerate>=0.30.1,<=0.34.2
peft>=0.11.1,<=0.12.0
trl>=0.8.6,<=0.9.6
attention:
transformers>=4.42.4 (gemma+fa2)
longlora:
transformers>=4.41.2,<=4.45.2
packing:
transformers>=4.41.2,<=4.45.2
Disable version checking: DISABLE_VERSION_CHECK=1
Enable VRAM recording: RECORD_VRAM=1
Force check imports: FORCE_CHECK_IMPORTS=1
Force using torchrun: FORCE_TORCHRUN=1
Set logging verbosity: LLAMAFACTORY_VERBOSITY=WARN
Use modelscope: USE_MODELSCOPE_HUB=1
"""
from
.extras.env
import
VERSION
__version__
=
VERSION
LLaMA-Factory/src/llamafactory/api/__init__.py
0 → 100644
View file @
802ef8b7
LLaMA-Factory/src/llamafactory/api/app.py
0 → 100644
View file @
802ef8b7
# Copyright 2024 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
asyncio
import
os
from
contextlib
import
asynccontextmanager
from
functools
import
partial
from
typing
import
Optional
from
typing_extensions
import
Annotated
from
..chat
import
ChatModel
from
..extras.misc
import
torch_gc
from
..extras.packages
import
is_fastapi_available
,
is_starlette_available
,
is_uvicorn_available
from
.chat
import
(
create_chat_completion_response
,
create_score_evaluation_response
,
create_stream_chat_completion_response
,
)
from
.protocol
import
(
ChatCompletionRequest
,
ChatCompletionResponse
,
ModelCard
,
ModelList
,
ScoreEvaluationRequest
,
ScoreEvaluationResponse
,
)
if
is_fastapi_available
():
from
fastapi
import
Depends
,
FastAPI
,
HTTPException
,
status
from
fastapi.middleware.cors
import
CORSMiddleware
from
fastapi.security.http
import
HTTPAuthorizationCredentials
,
HTTPBearer
if
is_starlette_available
():
from
sse_starlette
import
EventSourceResponse
if
is_uvicorn_available
():
import
uvicorn
async
def
sweeper
()
->
None
:
while
True
:
torch_gc
()
await
asyncio
.
sleep
(
300
)
@
asynccontextmanager
async
def
lifespan
(
app
:
"FastAPI"
,
chat_model
:
"ChatModel"
):
# collects GPU memory
if
chat_model
.
engine_type
==
"huggingface"
:
asyncio
.
create_task
(
sweeper
())
yield
torch_gc
()
def
create_app
(
chat_model
:
"ChatModel"
)
->
"FastAPI"
:
root_path
=
os
.
environ
.
get
(
"FASTAPI_ROOT_PATH"
,
""
)
app
=
FastAPI
(
lifespan
=
partial
(
lifespan
,
chat_model
=
chat_model
),
root_path
=
root_path
)
app
.
add_middleware
(
CORSMiddleware
,
allow_origins
=
[
"*"
],
allow_credentials
=
True
,
allow_methods
=
[
"*"
],
allow_headers
=
[
"*"
],
)
api_key
=
os
.
environ
.
get
(
"API_KEY"
,
None
)
security
=
HTTPBearer
(
auto_error
=
False
)
async
def
verify_api_key
(
auth
:
Annotated
[
Optional
[
HTTPAuthorizationCredentials
],
Depends
(
security
)]):
if
api_key
and
(
auth
is
None
or
auth
.
credentials
!=
api_key
):
raise
HTTPException
(
status_code
=
status
.
HTTP_401_UNAUTHORIZED
,
detail
=
"Invalid API key."
)
@
app
.
get
(
"/v1/models"
,
response_model
=
ModelList
,
status_code
=
status
.
HTTP_200_OK
,
dependencies
=
[
Depends
(
verify_api_key
)],
)
async
def
list_models
():
model_card
=
ModelCard
(
id
=
os
.
environ
.
get
(
"API_MODEL_NAME"
,
"gpt-3.5-turbo"
))
return
ModelList
(
data
=
[
model_card
])
@
app
.
post
(
"/v1/chat/completions"
,
response_model
=
ChatCompletionResponse
,
status_code
=
status
.
HTTP_200_OK
,
dependencies
=
[
Depends
(
verify_api_key
)],
)
async
def
create_chat_completion
(
request
:
ChatCompletionRequest
):
if
not
chat_model
.
engine
.
can_generate
:
raise
HTTPException
(
status_code
=
status
.
HTTP_405_METHOD_NOT_ALLOWED
,
detail
=
"Not allowed"
)
if
request
.
stream
:
generate
=
create_stream_chat_completion_response
(
request
,
chat_model
)
return
EventSourceResponse
(
generate
,
media_type
=
"text/event-stream"
)
else
:
return
await
create_chat_completion_response
(
request
,
chat_model
)
@
app
.
post
(
"/v1/score/evaluation"
,
response_model
=
ScoreEvaluationResponse
,
status_code
=
status
.
HTTP_200_OK
,
dependencies
=
[
Depends
(
verify_api_key
)],
)
async
def
create_score_evaluation
(
request
:
ScoreEvaluationRequest
):
if
chat_model
.
engine
.
can_generate
:
raise
HTTPException
(
status_code
=
status
.
HTTP_405_METHOD_NOT_ALLOWED
,
detail
=
"Not allowed"
)
return
await
create_score_evaluation_response
(
request
,
chat_model
)
return
app
def
run_api
()
->
None
:
chat_model
=
ChatModel
()
app
=
create_app
(
chat_model
)
api_host
=
os
.
environ
.
get
(
"API_HOST"
,
"0.0.0.0"
)
api_port
=
int
(
os
.
environ
.
get
(
"API_PORT"
,
"8000"
))
print
(
"Visit http://localhost:{}/docs for API document."
.
format
(
api_port
))
uvicorn
.
run
(
app
,
host
=
api_host
,
port
=
api_port
)
LLaMA-Factory/src/llamafactory/api/chat.py
0 → 100644
View file @
802ef8b7
# Copyright 2024 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
base64
import
io
import
json
import
os
import
re
import
uuid
from
typing
import
TYPE_CHECKING
,
AsyncGenerator
,
Dict
,
List
,
Optional
,
Tuple
from
..data
import
Role
as
DataRole
from
..extras.logging
import
get_logger
from
..extras.packages
import
is_fastapi_available
,
is_pillow_available
,
is_requests_available
from
.common
import
dictify
,
jsonify
from
.protocol
import
(
ChatCompletionMessage
,
ChatCompletionResponse
,
ChatCompletionResponseChoice
,
ChatCompletionResponseUsage
,
ChatCompletionStreamResponse
,
ChatCompletionStreamResponseChoice
,
Finish
,
Function
,
FunctionCall
,
Role
,
ScoreEvaluationResponse
,
)
if
is_fastapi_available
():
from
fastapi
import
HTTPException
,
status
if
is_pillow_available
():
from
PIL
import
Image
if
is_requests_available
():
import
requests
if
TYPE_CHECKING
:
from
..chat
import
ChatModel
from
..data.mm_plugin
import
ImageInput
from
.protocol
import
ChatCompletionRequest
,
ScoreEvaluationRequest
logger
=
get_logger
(
__name__
)
ROLE_MAPPING
=
{
Role
.
USER
:
DataRole
.
USER
.
value
,
Role
.
ASSISTANT
:
DataRole
.
ASSISTANT
.
value
,
Role
.
SYSTEM
:
DataRole
.
SYSTEM
.
value
,
Role
.
FUNCTION
:
DataRole
.
FUNCTION
.
value
,
Role
.
TOOL
:
DataRole
.
OBSERVATION
.
value
,
}
def
_process_request
(
request
:
"ChatCompletionRequest"
,
)
->
Tuple
[
List
[
Dict
[
str
,
str
]],
Optional
[
str
],
Optional
[
str
],
Optional
[
"ImageInput"
]]:
logger
.
info
(
"==== request ====
\n
{}"
.
format
(
json
.
dumps
(
dictify
(
request
),
indent
=
2
,
ensure_ascii
=
False
)))
if
len
(
request
.
messages
)
==
0
:
raise
HTTPException
(
status_code
=
status
.
HTTP_400_BAD_REQUEST
,
detail
=
"Invalid length"
)
if
request
.
messages
[
0
].
role
==
Role
.
SYSTEM
:
system
=
request
.
messages
.
pop
(
0
).
content
else
:
system
=
None
if
len
(
request
.
messages
)
%
2
==
0
:
raise
HTTPException
(
status_code
=
status
.
HTTP_400_BAD_REQUEST
,
detail
=
"Only supports u/a/u/a/u..."
)
input_messages
=
[]
image
=
None
for
i
,
message
in
enumerate
(
request
.
messages
):
if
i
%
2
==
0
and
message
.
role
not
in
[
Role
.
USER
,
Role
.
TOOL
]:
raise
HTTPException
(
status_code
=
status
.
HTTP_400_BAD_REQUEST
,
detail
=
"Invalid role"
)
elif
i
%
2
==
1
and
message
.
role
not
in
[
Role
.
ASSISTANT
,
Role
.
FUNCTION
]:
raise
HTTPException
(
status_code
=
status
.
HTTP_400_BAD_REQUEST
,
detail
=
"Invalid role"
)
if
message
.
role
==
Role
.
ASSISTANT
and
isinstance
(
message
.
tool_calls
,
list
)
and
len
(
message
.
tool_calls
):
tool_calls
=
[
{
"name"
:
tool_call
.
function
.
name
,
"arguments"
:
tool_call
.
function
.
arguments
}
for
tool_call
in
message
.
tool_calls
]
content
=
json
.
dumps
(
tool_calls
,
ensure_ascii
=
False
)
input_messages
.
append
({
"role"
:
ROLE_MAPPING
[
Role
.
FUNCTION
],
"content"
:
content
})
elif
isinstance
(
message
.
content
,
list
):
for
input_item
in
message
.
content
:
if
input_item
.
type
==
"text"
:
input_messages
.
append
({
"role"
:
ROLE_MAPPING
[
message
.
role
],
"content"
:
input_item
.
text
})
else
:
image_url
=
input_item
.
image_url
.
url
if
re
.
match
(
r
"^data:image\/(png|jpg|jpeg|gif|bmp);base64,(.+)$"
,
image_url
):
# base64 image
image_stream
=
io
.
BytesIO
(
base64
.
b64decode
(
image_url
.
split
(
","
,
maxsplit
=
1
)[
1
]))
elif
os
.
path
.
isfile
(
image_url
):
# local file
image_stream
=
open
(
image_url
,
"rb"
)
else
:
# web uri
image_stream
=
requests
.
get
(
image_url
,
stream
=
True
).
raw
image
=
Image
.
open
(
image_stream
).
convert
(
"RGB"
)
else
:
input_messages
.
append
({
"role"
:
ROLE_MAPPING
[
message
.
role
],
"content"
:
message
.
content
})
tool_list
=
request
.
tools
if
isinstance
(
tool_list
,
list
)
and
len
(
tool_list
):
try
:
tools
=
json
.
dumps
([
dictify
(
tool
.
function
)
for
tool
in
tool_list
],
ensure_ascii
=
False
)
except
json
.
JSONDecodeError
:
raise
HTTPException
(
status_code
=
status
.
HTTP_400_BAD_REQUEST
,
detail
=
"Invalid tools"
)
else
:
tools
=
None
return
input_messages
,
system
,
tools
,
image
def
_create_stream_chat_completion_chunk
(
completion_id
:
str
,
model
:
str
,
delta
:
"ChatCompletionMessage"
,
index
:
Optional
[
int
]
=
0
,
finish_reason
:
Optional
[
"Finish"
]
=
None
,
)
->
str
:
choice_data
=
ChatCompletionStreamResponseChoice
(
index
=
index
,
delta
=
delta
,
finish_reason
=
finish_reason
)
chunk
=
ChatCompletionStreamResponse
(
id
=
completion_id
,
model
=
model
,
choices
=
[
choice_data
])
return
jsonify
(
chunk
)
async
def
create_chat_completion_response
(
request
:
"ChatCompletionRequest"
,
chat_model
:
"ChatModel"
)
->
"ChatCompletionResponse"
:
completion_id
=
"chatcmpl-{}"
.
format
(
uuid
.
uuid4
().
hex
)
input_messages
,
system
,
tools
,
image
=
_process_request
(
request
)
responses
=
await
chat_model
.
achat
(
input_messages
,
system
,
tools
,
image
,
do_sample
=
request
.
do_sample
,
temperature
=
request
.
temperature
,
top_p
=
request
.
top_p
,
max_new_tokens
=
request
.
max_tokens
,
num_return_sequences
=
request
.
n
,
stop
=
request
.
stop
,
)
prompt_length
,
response_length
=
0
,
0
choices
=
[]
for
i
,
response
in
enumerate
(
responses
):
if
tools
:
result
=
chat_model
.
engine
.
template
.
extract_tool
(
response
.
response_text
)
else
:
result
=
response
.
response_text
if
isinstance
(
result
,
list
):
tool_calls
=
[]
for
tool
in
result
:
function
=
Function
(
name
=
tool
[
0
],
arguments
=
tool
[
1
])
tool_calls
.
append
(
FunctionCall
(
id
=
"call_{}"
.
format
(
uuid
.
uuid4
().
hex
),
function
=
function
))
response_message
=
ChatCompletionMessage
(
role
=
Role
.
ASSISTANT
,
tool_calls
=
tool_calls
)
finish_reason
=
Finish
.
TOOL
else
:
response_message
=
ChatCompletionMessage
(
role
=
Role
.
ASSISTANT
,
content
=
result
)
finish_reason
=
Finish
.
STOP
if
response
.
finish_reason
==
"stop"
else
Finish
.
LENGTH
choices
.
append
(
ChatCompletionResponseChoice
(
index
=
i
,
message
=
response_message
,
finish_reason
=
finish_reason
))
prompt_length
=
response
.
prompt_length
response_length
+=
response
.
response_length
usage
=
ChatCompletionResponseUsage
(
prompt_tokens
=
prompt_length
,
completion_tokens
=
response_length
,
total_tokens
=
prompt_length
+
response_length
,
)
return
ChatCompletionResponse
(
id
=
completion_id
,
model
=
request
.
model
,
choices
=
choices
,
usage
=
usage
)
async
def
create_stream_chat_completion_response
(
request
:
"ChatCompletionRequest"
,
chat_model
:
"ChatModel"
)
->
AsyncGenerator
[
str
,
None
]:
completion_id
=
"chatcmpl-{}"
.
format
(
uuid
.
uuid4
().
hex
)
input_messages
,
system
,
tools
,
image
=
_process_request
(
request
)
if
tools
:
raise
HTTPException
(
status_code
=
status
.
HTTP_400_BAD_REQUEST
,
detail
=
"Cannot stream function calls."
)
if
request
.
n
>
1
:
raise
HTTPException
(
status_code
=
status
.
HTTP_400_BAD_REQUEST
,
detail
=
"Cannot stream multiple responses."
)
yield
_create_stream_chat_completion_chunk
(
completion_id
=
completion_id
,
model
=
request
.
model
,
delta
=
ChatCompletionMessage
(
role
=
Role
.
ASSISTANT
,
content
=
""
)
)
async
for
new_token
in
chat_model
.
astream_chat
(
input_messages
,
system
,
tools
,
image
,
do_sample
=
request
.
do_sample
,
temperature
=
request
.
temperature
,
top_p
=
request
.
top_p
,
max_new_tokens
=
request
.
max_tokens
,
stop
=
request
.
stop
,
):
if
len
(
new_token
)
!=
0
:
yield
_create_stream_chat_completion_chunk
(
completion_id
=
completion_id
,
model
=
request
.
model
,
delta
=
ChatCompletionMessage
(
content
=
new_token
)
)
yield
_create_stream_chat_completion_chunk
(
completion_id
=
completion_id
,
model
=
request
.
model
,
delta
=
ChatCompletionMessage
(),
finish_reason
=
Finish
.
STOP
)
yield
"[DONE]"
async
def
create_score_evaluation_response
(
request
:
"ScoreEvaluationRequest"
,
chat_model
:
"ChatModel"
)
->
"ScoreEvaluationResponse"
:
score_id
=
"scoreval-{}"
.
format
(
uuid
.
uuid4
().
hex
)
if
len
(
request
.
messages
)
==
0
:
raise
HTTPException
(
status_code
=
status
.
HTTP_400_BAD_REQUEST
,
detail
=
"Invalid request"
)
scores
=
await
chat_model
.
aget_scores
(
request
.
messages
,
max_length
=
request
.
max_length
)
return
ScoreEvaluationResponse
(
id
=
score_id
,
model
=
request
.
model
,
scores
=
scores
)
LLaMA-Factory/src/llamafactory/api/common.py
0 → 100644
View file @
802ef8b7
# Copyright 2024 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
json
from
typing
import
TYPE_CHECKING
,
Any
,
Dict
if
TYPE_CHECKING
:
from
pydantic
import
BaseModel
def
dictify
(
data
:
"BaseModel"
)
->
Dict
[
str
,
Any
]:
try
:
# pydantic v2
return
data
.
model_dump
(
exclude_unset
=
True
)
except
AttributeError
:
# pydantic v1
return
data
.
dict
(
exclude_unset
=
True
)
def
jsonify
(
data
:
"BaseModel"
)
->
str
:
try
:
# pydantic v2
return
json
.
dumps
(
data
.
model_dump
(
exclude_unset
=
True
),
ensure_ascii
=
False
)
except
AttributeError
:
# pydantic v1
return
data
.
json
(
exclude_unset
=
True
,
ensure_ascii
=
False
)
LLaMA-Factory/src/llamafactory/api/protocol.py
0 → 100644
View file @
802ef8b7
# Copyright 2024 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
time
from
enum
import
Enum
,
unique
from
typing
import
Any
,
Dict
,
List
,
Optional
,
Union
from
pydantic
import
BaseModel
,
Field
from
typing_extensions
import
Literal
@
unique
class
Role
(
str
,
Enum
):
USER
=
"user"
ASSISTANT
=
"assistant"
SYSTEM
=
"system"
FUNCTION
=
"function"
TOOL
=
"tool"
@
unique
class
Finish
(
str
,
Enum
):
STOP
=
"stop"
LENGTH
=
"length"
TOOL
=
"tool_calls"
class
ModelCard
(
BaseModel
):
id
:
str
object
:
Literal
[
"model"
]
=
"model"
created
:
int
=
Field
(
default_factory
=
lambda
:
int
(
time
.
time
()))
owned_by
:
Literal
[
"owner"
]
=
"owner"
class
ModelList
(
BaseModel
):
object
:
Literal
[
"list"
]
=
"list"
data
:
List
[
ModelCard
]
=
[]
class
Function
(
BaseModel
):
name
:
str
arguments
:
str
class
FunctionDefinition
(
BaseModel
):
name
:
str
description
:
str
parameters
:
Dict
[
str
,
Any
]
class
FunctionAvailable
(
BaseModel
):
type
:
Literal
[
"function"
,
"code_interpreter"
]
=
"function"
function
:
Optional
[
FunctionDefinition
]
=
None
class
FunctionCall
(
BaseModel
):
id
:
str
type
:
Literal
[
"function"
]
=
"function"
function
:
Function
class
ImageURL
(
BaseModel
):
url
:
str
class
MultimodalInputItem
(
BaseModel
):
type
:
Literal
[
"text"
,
"image_url"
]
text
:
Optional
[
str
]
=
None
image_url
:
Optional
[
ImageURL
]
=
None
class
ChatMessage
(
BaseModel
):
role
:
Role
content
:
Optional
[
Union
[
str
,
List
[
MultimodalInputItem
]]]
=
None
tool_calls
:
Optional
[
List
[
FunctionCall
]]
=
None
class
ChatCompletionMessage
(
BaseModel
):
role
:
Optional
[
Role
]
=
None
content
:
Optional
[
str
]
=
None
tool_calls
:
Optional
[
List
[
FunctionCall
]]
=
None
class
ChatCompletionRequest
(
BaseModel
):
model
:
str
messages
:
List
[
ChatMessage
]
tools
:
Optional
[
List
[
FunctionAvailable
]]
=
None
do_sample
:
Optional
[
bool
]
=
None
temperature
:
Optional
[
float
]
=
None
top_p
:
Optional
[
float
]
=
None
n
:
int
=
1
max_tokens
:
Optional
[
int
]
=
None
stop
:
Optional
[
Union
[
str
,
List
[
str
]]]
=
None
stream
:
bool
=
False
class
ChatCompletionResponseChoice
(
BaseModel
):
index
:
int
message
:
ChatCompletionMessage
finish_reason
:
Finish
class
ChatCompletionStreamResponseChoice
(
BaseModel
):
index
:
int
delta
:
ChatCompletionMessage
finish_reason
:
Optional
[
Finish
]
=
None
class
ChatCompletionResponseUsage
(
BaseModel
):
prompt_tokens
:
int
completion_tokens
:
int
total_tokens
:
int
class
ChatCompletionResponse
(
BaseModel
):
id
:
str
object
:
Literal
[
"chat.completion"
]
=
"chat.completion"
created
:
int
=
Field
(
default_factory
=
lambda
:
int
(
time
.
time
()))
model
:
str
choices
:
List
[
ChatCompletionResponseChoice
]
usage
:
ChatCompletionResponseUsage
class
ChatCompletionStreamResponse
(
BaseModel
):
id
:
str
object
:
Literal
[
"chat.completion.chunk"
]
=
"chat.completion.chunk"
created
:
int
=
Field
(
default_factory
=
lambda
:
int
(
time
.
time
()))
model
:
str
choices
:
List
[
ChatCompletionStreamResponseChoice
]
class
ScoreEvaluationRequest
(
BaseModel
):
model
:
str
messages
:
List
[
str
]
max_length
:
Optional
[
int
]
=
None
class
ScoreEvaluationResponse
(
BaseModel
):
id
:
str
object
:
Literal
[
"score.evaluation"
]
=
"score.evaluation"
model
:
str
scores
:
List
[
float
]
LLaMA-Factory/src/llamafactory/chat/__init__.py
0 → 100644
View file @
802ef8b7
# Copyright 2024 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
.base_engine
import
BaseEngine
from
.chat_model
import
ChatModel
__all__
=
[
"BaseEngine"
,
"ChatModel"
]
LLaMA-Factory/src/llamafactory/chat/base_engine.py
0 → 100644
View file @
802ef8b7
# Copyright 2024 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
abc
import
ABC
,
abstractmethod
from
dataclasses
import
dataclass
from
typing
import
TYPE_CHECKING
,
Any
,
AsyncGenerator
,
Dict
,
List
,
Literal
,
Optional
,
Sequence
,
Union
if
TYPE_CHECKING
:
from
transformers
import
PreTrainedModel
,
PreTrainedTokenizer
from
vllm
import
AsyncLLMEngine
from
..data
import
Template
from
..data.mm_plugin
import
ImageInput
,
VideoInput
from
..hparams
import
DataArguments
,
FinetuningArguments
,
GeneratingArguments
,
ModelArguments
@
dataclass
class
Response
:
response_text
:
str
response_length
:
int
prompt_length
:
int
finish_reason
:
Literal
[
"stop"
,
"length"
]
class
BaseEngine
(
ABC
):
r
"""
Base class for inference engine of chat models.
Must implements async methods: chat(), stream_chat() and get_scores().
"""
model
:
Union
[
"PreTrainedModel"
,
"AsyncLLMEngine"
]
tokenizer
:
"PreTrainedTokenizer"
can_generate
:
bool
template
:
"Template"
generating_args
:
Dict
[
str
,
Any
]
@
abstractmethod
def
__init__
(
self
,
model_args
:
"ModelArguments"
,
data_args
:
"DataArguments"
,
finetuning_args
:
"FinetuningArguments"
,
generating_args
:
"GeneratingArguments"
,
)
->
None
:
r
"""
Initializes an inference engine.
"""
...
@
abstractmethod
async
def
chat
(
self
,
messages
:
Sequence
[
Dict
[
str
,
str
]],
system
:
Optional
[
str
]
=
None
,
tools
:
Optional
[
str
]
=
None
,
image
:
Optional
[
"ImageInput"
]
=
None
,
video
:
Optional
[
"VideoInput"
]
=
None
,
**
input_kwargs
,
)
->
List
[
"Response"
]:
r
"""
Gets a list of responses of the chat model.
"""
...
@
abstractmethod
async
def
stream_chat
(
self
,
messages
:
Sequence
[
Dict
[
str
,
str
]],
system
:
Optional
[
str
]
=
None
,
tools
:
Optional
[
str
]
=
None
,
image
:
Optional
[
"ImageInput"
]
=
None
,
video
:
Optional
[
"VideoInput"
]
=
None
,
**
input_kwargs
,
)
->
AsyncGenerator
[
str
,
None
]:
r
"""
Gets the response token-by-token of the chat model.
"""
...
@
abstractmethod
async
def
get_scores
(
self
,
batch_input
:
List
[
str
],
**
input_kwargs
,
)
->
List
[
float
]:
r
"""
Gets a list of scores of the reward model.
"""
...
LLaMA-Factory/src/llamafactory/chat/chat_model.py
0 → 100644
View file @
802ef8b7
# Copyright 2024 THUDM and the LlamaFactory team.
#
# This code is inspired by the THUDM's ChatGLM implementation.
# https://github.com/THUDM/ChatGLM-6B/blob/main/cli_demo.py
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
asyncio
import
os
from
threading
import
Thread
from
typing
import
TYPE_CHECKING
,
Any
,
AsyncGenerator
,
Dict
,
Generator
,
List
,
Optional
,
Sequence
from
..extras.misc
import
torch_gc
from
..hparams
import
get_infer_args
from
.hf_engine
import
HuggingfaceEngine
from
.vllm_engine
import
VllmEngine
if
TYPE_CHECKING
:
from
..data.mm_plugin
import
ImageInput
,
VideoInput
from
.base_engine
import
BaseEngine
,
Response
def
_start_background_loop
(
loop
:
"asyncio.AbstractEventLoop"
)
->
None
:
asyncio
.
set_event_loop
(
loop
)
loop
.
run_forever
()
class
ChatModel
:
r
"""
General class for chat models. Backed by huggingface or vllm engines.
Supports both sync and async methods.
Sync methods: chat(), stream_chat() and get_scores().
Async methods: achat(), astream_chat() and aget_scores().
"""
def
__init__
(
self
,
args
:
Optional
[
Dict
[
str
,
Any
]]
=
None
)
->
None
:
model_args
,
data_args
,
finetuning_args
,
generating_args
=
get_infer_args
(
args
)
self
.
engine_type
=
model_args
.
infer_backend
if
model_args
.
infer_backend
==
"huggingface"
:
self
.
engine
:
"BaseEngine"
=
HuggingfaceEngine
(
model_args
,
data_args
,
finetuning_args
,
generating_args
)
elif
model_args
.
infer_backend
==
"vllm"
:
self
.
engine
:
"BaseEngine"
=
VllmEngine
(
model_args
,
data_args
,
finetuning_args
,
generating_args
)
else
:
raise
NotImplementedError
(
"Unknown backend: {}"
.
format
(
model_args
.
infer_backend
))
self
.
_loop
=
asyncio
.
new_event_loop
()
self
.
_thread
=
Thread
(
target
=
_start_background_loop
,
args
=
(
self
.
_loop
,),
daemon
=
True
)
self
.
_thread
.
start
()
def
chat
(
self
,
messages
:
Sequence
[
Dict
[
str
,
str
]],
system
:
Optional
[
str
]
=
None
,
tools
:
Optional
[
str
]
=
None
,
image
:
Optional
[
"ImageInput"
]
=
None
,
video
:
Optional
[
"VideoInput"
]
=
None
,
**
input_kwargs
,
)
->
List
[
"Response"
]:
r
"""
Gets a list of responses of the chat model.
"""
task
=
asyncio
.
run_coroutine_threadsafe
(
self
.
achat
(
messages
,
system
,
tools
,
image
,
video
,
**
input_kwargs
),
self
.
_loop
)
return
task
.
result
()
async
def
achat
(
self
,
messages
:
Sequence
[
Dict
[
str
,
str
]],
system
:
Optional
[
str
]
=
None
,
tools
:
Optional
[
str
]
=
None
,
image
:
Optional
[
"ImageInput"
]
=
None
,
video
:
Optional
[
"VideoInput"
]
=
None
,
**
input_kwargs
,
)
->
List
[
"Response"
]:
r
"""
Asynchronously gets a list of responses of the chat model.
"""
return
await
self
.
engine
.
chat
(
messages
,
system
,
tools
,
image
,
video
,
**
input_kwargs
)
def
stream_chat
(
self
,
messages
:
Sequence
[
Dict
[
str
,
str
]],
system
:
Optional
[
str
]
=
None
,
tools
:
Optional
[
str
]
=
None
,
image
:
Optional
[
"ImageInput"
]
=
None
,
video
:
Optional
[
"VideoInput"
]
=
None
,
**
input_kwargs
,
)
->
Generator
[
str
,
None
,
None
]:
r
"""
Gets the response token-by-token of the chat model.
"""
generator
=
self
.
astream_chat
(
messages
,
system
,
tools
,
image
,
video
,
**
input_kwargs
)
while
True
:
try
:
task
=
asyncio
.
run_coroutine_threadsafe
(
generator
.
__anext__
(),
self
.
_loop
)
yield
task
.
result
()
except
StopAsyncIteration
:
break
async
def
astream_chat
(
self
,
messages
:
Sequence
[
Dict
[
str
,
str
]],
system
:
Optional
[
str
]
=
None
,
tools
:
Optional
[
str
]
=
None
,
image
:
Optional
[
"ImageInput"
]
=
None
,
video
:
Optional
[
"VideoInput"
]
=
None
,
**
input_kwargs
,
)
->
AsyncGenerator
[
str
,
None
]:
r
"""
Asynchronously gets the response token-by-token of the chat model.
"""
async
for
new_token
in
self
.
engine
.
stream_chat
(
messages
,
system
,
tools
,
image
,
video
,
**
input_kwargs
):
yield
new_token
def
get_scores
(
self
,
batch_input
:
List
[
str
],
**
input_kwargs
,
)
->
List
[
float
]:
r
"""
Gets a list of scores of the reward model.
"""
task
=
asyncio
.
run_coroutine_threadsafe
(
self
.
aget_scores
(
batch_input
,
**
input_kwargs
),
self
.
_loop
)
return
task
.
result
()
async
def
aget_scores
(
self
,
batch_input
:
List
[
str
],
**
input_kwargs
,
)
->
List
[
float
]:
r
"""
Asynchronously gets a list of scores of the reward model.
"""
return
await
self
.
engine
.
get_scores
(
batch_input
,
**
input_kwargs
)
def
run_chat
()
->
None
:
if
os
.
name
!=
"nt"
:
try
:
import
readline
# noqa: F401
except
ImportError
:
print
(
"Install `readline` for a better experience."
)
chat_model
=
ChatModel
()
messages
=
[]
print
(
"Welcome to the CLI application, use `clear` to remove the history, use `exit` to exit the application."
)
while
True
:
try
:
query
=
input
(
"
\n
User: "
)
except
UnicodeDecodeError
:
print
(
"Detected decoding error at the inputs, please set the terminal encoding to utf-8."
)
continue
except
Exception
:
raise
if
query
.
strip
()
==
"exit"
:
break
if
query
.
strip
()
==
"clear"
:
messages
=
[]
torch_gc
()
print
(
"History has been removed."
)
continue
messages
.
append
({
"role"
:
"user"
,
"content"
:
query
})
print
(
"Assistant: "
,
end
=
""
,
flush
=
True
)
response
=
""
for
new_text
in
chat_model
.
stream_chat
(
messages
):
print
(
new_text
,
end
=
""
,
flush
=
True
)
response
+=
new_text
print
()
messages
.
append
({
"role"
:
"assistant"
,
"content"
:
response
})
LLaMA-Factory/src/llamafactory/chat/hf_engine.py
0 → 100644
View file @
802ef8b7
# Copyright 2024 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
asyncio
import
concurrent.futures
import
os
from
threading
import
Thread
from
typing
import
TYPE_CHECKING
,
Any
,
AsyncGenerator
,
Callable
,
Dict
,
List
,
Optional
,
Sequence
,
Tuple
,
Union
import
torch
from
transformers
import
GenerationConfig
,
TextIteratorStreamer
from
typing_extensions
import
override
from
..data
import
get_template_and_fix_tokenizer
from
..extras.constants
import
IMAGE_PLACEHOLDER
,
VIDEO_PLACEHOLDER
from
..extras.logging
import
get_logger
from
..extras.misc
import
get_logits_processor
from
..model
import
load_model
,
load_tokenizer
from
.base_engine
import
BaseEngine
,
Response
if
TYPE_CHECKING
:
from
transformers
import
PreTrainedModel
,
PreTrainedTokenizer
,
ProcessorMixin
from
trl
import
PreTrainedModelWrapper
from
..data
import
Template
from
..data.mm_plugin
import
ImageInput
,
VideoInput
from
..hparams
import
DataArguments
,
FinetuningArguments
,
GeneratingArguments
,
ModelArguments
logger
=
get_logger
(
__name__
)
class
HuggingfaceEngine
(
BaseEngine
):
def
__init__
(
self
,
model_args
:
"ModelArguments"
,
data_args
:
"DataArguments"
,
finetuning_args
:
"FinetuningArguments"
,
generating_args
:
"GeneratingArguments"
,
)
->
None
:
self
.
can_generate
=
finetuning_args
.
stage
==
"sft"
tokenizer_module
=
load_tokenizer
(
model_args
)
self
.
tokenizer
=
tokenizer_module
[
"tokenizer"
]
self
.
processor
=
tokenizer_module
[
"processor"
]
self
.
tokenizer
.
padding_side
=
"left"
if
self
.
can_generate
else
"right"
self
.
template
=
get_template_and_fix_tokenizer
(
self
.
tokenizer
,
data_args
)
self
.
model
=
load_model
(
self
.
tokenizer
,
model_args
,
finetuning_args
,
is_trainable
=
False
,
add_valuehead
=
(
not
self
.
can_generate
)
)
# must after fixing tokenizer to resize vocab
self
.
generating_args
=
generating_args
.
to_dict
()
try
:
asyncio
.
get_event_loop
()
except
RuntimeError
:
logger
.
warning
(
"There is no current event loop, creating a new one."
)
loop
=
asyncio
.
new_event_loop
()
asyncio
.
set_event_loop
(
loop
)
self
.
semaphore
=
asyncio
.
Semaphore
(
int
(
os
.
environ
.
get
(
"MAX_CONCURRENT"
,
"1"
)))
@
staticmethod
def
_process_args
(
model
:
"PreTrainedModel"
,
tokenizer
:
"PreTrainedTokenizer"
,
processor
:
Optional
[
"ProcessorMixin"
],
template
:
"Template"
,
generating_args
:
Dict
[
str
,
Any
],
messages
:
Sequence
[
Dict
[
str
,
str
]],
system
:
Optional
[
str
]
=
None
,
tools
:
Optional
[
str
]
=
None
,
image
:
Optional
[
"ImageInput"
]
=
None
,
video
:
Optional
[
"VideoInput"
]
=
None
,
input_kwargs
:
Optional
[
Dict
[
str
,
Any
]]
=
{},
)
->
Tuple
[
Dict
[
str
,
Any
],
int
]:
mm_input_dict
=
{
"images"
:
[],
"videos"
:
[],
"imglens"
:
[
0
],
"vidlens"
:
[
0
]}
if
image
is
not
None
:
mm_input_dict
.
update
({
"images"
:
[
image
],
"imglens"
:
[
1
]})
if
IMAGE_PLACEHOLDER
not
in
messages
[
0
][
"content"
]:
messages
[
0
][
"content"
]
=
IMAGE_PLACEHOLDER
+
messages
[
0
][
"content"
]
if
video
is
not
None
:
mm_input_dict
.
update
({
"videos"
:
[
video
],
"vidlens"
:
[
1
]})
if
VIDEO_PLACEHOLDER
not
in
messages
[
0
][
"content"
]:
messages
[
0
][
"content"
]
=
VIDEO_PLACEHOLDER
+
messages
[
0
][
"content"
]
messages
=
template
.
mm_plugin
.
process_messages
(
messages
,
mm_input_dict
[
"images"
],
mm_input_dict
[
"videos"
],
processor
)
paired_messages
=
messages
+
[{
"role"
:
"assistant"
,
"content"
:
""
}]
system
=
system
or
generating_args
[
"default_system"
]
prompt_ids
,
_
=
template
.
encode_oneturn
(
tokenizer
,
paired_messages
,
system
,
tools
)
prompt_ids
,
_
=
template
.
mm_plugin
.
process_token_ids
(
prompt_ids
,
None
,
mm_input_dict
[
"images"
],
mm_input_dict
[
"videos"
],
tokenizer
,
processor
)
prompt_length
=
len
(
prompt_ids
)
inputs
=
torch
.
tensor
([
prompt_ids
],
device
=
model
.
device
)
attention_mask
=
torch
.
ones_like
(
inputs
,
dtype
=
torch
.
bool
)
do_sample
:
Optional
[
bool
]
=
input_kwargs
.
pop
(
"do_sample"
,
None
)
temperature
:
Optional
[
float
]
=
input_kwargs
.
pop
(
"temperature"
,
None
)
top_p
:
Optional
[
float
]
=
input_kwargs
.
pop
(
"top_p"
,
None
)
top_k
:
Optional
[
float
]
=
input_kwargs
.
pop
(
"top_k"
,
None
)
num_return_sequences
:
int
=
input_kwargs
.
pop
(
"num_return_sequences"
,
1
)
repetition_penalty
:
Optional
[
float
]
=
input_kwargs
.
pop
(
"repetition_penalty"
,
None
)
length_penalty
:
Optional
[
float
]
=
input_kwargs
.
pop
(
"length_penalty"
,
None
)
max_length
:
Optional
[
int
]
=
input_kwargs
.
pop
(
"max_length"
,
None
)
max_new_tokens
:
Optional
[
int
]
=
input_kwargs
.
pop
(
"max_new_tokens"
,
None
)
stop
:
Optional
[
Union
[
str
,
List
[
str
]]]
=
input_kwargs
.
pop
(
"stop"
,
None
)
if
stop
is
not
None
:
logger
.
warning
(
"Stop parameter is not supported by the huggingface engine yet."
)
generating_args
=
generating_args
.
copy
()
generating_args
.
update
(
dict
(
do_sample
=
do_sample
if
do_sample
is
not
None
else
generating_args
[
"do_sample"
],
temperature
=
temperature
if
temperature
is
not
None
else
generating_args
[
"temperature"
],
top_p
=
top_p
if
top_p
is
not
None
else
generating_args
[
"top_p"
],
top_k
=
top_k
if
top_k
is
not
None
else
generating_args
[
"top_k"
],
num_return_sequences
=
num_return_sequences
,
repetition_penalty
=
repetition_penalty
if
repetition_penalty
is
not
None
else
generating_args
[
"repetition_penalty"
],
length_penalty
=
length_penalty
if
length_penalty
is
not
None
else
generating_args
[
"length_penalty"
],
eos_token_id
=
[
tokenizer
.
eos_token_id
]
+
tokenizer
.
additional_special_tokens_ids
,
pad_token_id
=
tokenizer
.
pad_token_id
,
)
)
if
isinstance
(
num_return_sequences
,
int
)
and
num_return_sequences
>
1
:
# do_sample needs temperature > 0
generating_args
[
"do_sample"
]
=
True
generating_args
[
"temperature"
]
=
generating_args
[
"temperature"
]
or
1.0
if
not
generating_args
[
"temperature"
]:
generating_args
[
"do_sample"
]
=
False
if
not
generating_args
[
"do_sample"
]:
generating_args
.
pop
(
"temperature"
,
None
)
generating_args
.
pop
(
"top_p"
,
None
)
if
max_length
:
generating_args
.
pop
(
"max_new_tokens"
,
None
)
generating_args
[
"max_length"
]
=
max_length
if
max_new_tokens
:
generating_args
.
pop
(
"max_length"
,
None
)
generating_args
[
"max_new_tokens"
]
=
max_new_tokens
gen_kwargs
=
dict
(
inputs
=
inputs
,
attention_mask
=
attention_mask
,
generation_config
=
GenerationConfig
(
**
generating_args
),
logits_processor
=
get_logits_processor
(),
)
mm_inputs
=
template
.
mm_plugin
.
get_mm_inputs
(
**
mm_input_dict
,
seqlens
=
[
prompt_length
],
processor
=
processor
)
for
key
,
value
in
mm_inputs
.
items
():
value
=
value
if
isinstance
(
value
,
torch
.
Tensor
)
else
torch
.
tensor
(
value
)
gen_kwargs
[
key
]
=
value
.
to
(
model
.
device
)
return
gen_kwargs
,
prompt_length
@
staticmethod
@
torch
.
inference_mode
()
def
_chat
(
model
:
"PreTrainedModel"
,
tokenizer
:
"PreTrainedTokenizer"
,
processor
:
Optional
[
"ProcessorMixin"
],
template
:
"Template"
,
generating_args
:
Dict
[
str
,
Any
],
messages
:
Sequence
[
Dict
[
str
,
str
]],
system
:
Optional
[
str
]
=
None
,
tools
:
Optional
[
str
]
=
None
,
image
:
Optional
[
"ImageInput"
]
=
None
,
video
:
Optional
[
"VideoInput"
]
=
None
,
input_kwargs
:
Optional
[
Dict
[
str
,
Any
]]
=
{},
)
->
List
[
"Response"
]:
gen_kwargs
,
prompt_length
=
HuggingfaceEngine
.
_process_args
(
model
,
tokenizer
,
processor
,
template
,
generating_args
,
messages
,
system
,
tools
,
image
,
video
,
input_kwargs
)
generate_output
=
model
.
generate
(
**
gen_kwargs
)
response_ids
=
generate_output
[:,
prompt_length
:]
response
=
tokenizer
.
batch_decode
(
response_ids
,
skip_special_tokens
=
True
,
clean_up_tokenization_spaces
=
True
)
results
=
[]
for
i
in
range
(
len
(
response
)):
eos_index
=
(
response_ids
[
i
]
==
tokenizer
.
eos_token_id
).
nonzero
()
response_length
=
(
eos_index
[
0
].
item
()
+
1
)
if
len
(
eos_index
)
else
len
(
response_ids
[
i
])
results
.
append
(
Response
(
response_text
=
response
[
i
],
response_length
=
response_length
,
prompt_length
=
prompt_length
,
finish_reason
=
"stop"
if
len
(
eos_index
)
else
"length"
,
)
)
return
results
@
staticmethod
@
torch
.
inference_mode
()
def
_stream_chat
(
model
:
"PreTrainedModel"
,
tokenizer
:
"PreTrainedTokenizer"
,
processor
:
Optional
[
"ProcessorMixin"
],
template
:
"Template"
,
generating_args
:
Dict
[
str
,
Any
],
messages
:
Sequence
[
Dict
[
str
,
str
]],
system
:
Optional
[
str
]
=
None
,
tools
:
Optional
[
str
]
=
None
,
image
:
Optional
[
"ImageInput"
]
=
None
,
video
:
Optional
[
"VideoInput"
]
=
None
,
input_kwargs
:
Optional
[
Dict
[
str
,
Any
]]
=
{},
)
->
Callable
[[],
str
]:
gen_kwargs
,
_
=
HuggingfaceEngine
.
_process_args
(
model
,
tokenizer
,
processor
,
template
,
generating_args
,
messages
,
system
,
tools
,
image
,
video
,
input_kwargs
)
streamer
=
TextIteratorStreamer
(
tokenizer
,
skip_prompt
=
True
,
skip_special_tokens
=
True
)
gen_kwargs
[
"streamer"
]
=
streamer
thread
=
Thread
(
target
=
model
.
generate
,
kwargs
=
gen_kwargs
,
daemon
=
True
)
thread
.
start
()
def
stream
():
try
:
return
streamer
.
__next__
()
except
StopIteration
:
raise
StopAsyncIteration
()
return
stream
@
staticmethod
@
torch
.
inference_mode
()
def
_get_scores
(
model
:
"PreTrainedModelWrapper"
,
tokenizer
:
"PreTrainedTokenizer"
,
batch_input
:
List
[
str
],
input_kwargs
:
Optional
[
Dict
[
str
,
Any
]]
=
{},
)
->
List
[
float
]:
max_length
:
Optional
[
int
]
=
input_kwargs
.
pop
(
"max_length"
,
None
)
device
=
getattr
(
model
.
pretrained_model
,
"device"
,
"cuda"
)
inputs
:
Dict
[
str
,
"torch.Tensor"
]
=
tokenizer
(
batch_input
,
padding
=
True
,
truncation
=
True
,
max_length
=
max_length
or
getattr
(
model
.
config
,
"max_position_embeddings"
,
1024
),
return_tensors
=
"pt"
,
add_special_tokens
=
False
,
).
to
(
device
)
values
:
"torch.Tensor"
=
model
(
**
inputs
,
return_dict
=
True
,
use_cache
=
False
)[
-
1
]
scores
=
values
.
gather
(
dim
=-
1
,
index
=
(
inputs
[
"attention_mask"
].
sum
(
dim
=-
1
,
keepdim
=
True
)
-
1
))
return
scores
@
override
async
def
chat
(
self
,
messages
:
Sequence
[
Dict
[
str
,
str
]],
system
:
Optional
[
str
]
=
None
,
tools
:
Optional
[
str
]
=
None
,
image
:
Optional
[
"ImageInput"
]
=
None
,
video
:
Optional
[
"VideoInput"
]
=
None
,
**
input_kwargs
,
)
->
List
[
"Response"
]:
if
not
self
.
can_generate
:
raise
ValueError
(
"The current model does not support `chat`."
)
loop
=
asyncio
.
get_running_loop
()
input_args
=
(
self
.
model
,
self
.
tokenizer
,
self
.
processor
,
self
.
template
,
self
.
generating_args
,
messages
,
system
,
tools
,
image
,
video
,
input_kwargs
,
)
async
with
self
.
semaphore
:
with
concurrent
.
futures
.
ThreadPoolExecutor
()
as
pool
:
return
await
loop
.
run_in_executor
(
pool
,
self
.
_chat
,
*
input_args
)
@
override
async
def
stream_chat
(
self
,
messages
:
Sequence
[
Dict
[
str
,
str
]],
system
:
Optional
[
str
]
=
None
,
tools
:
Optional
[
str
]
=
None
,
image
:
Optional
[
"ImageInput"
]
=
None
,
video
:
Optional
[
"VideoInput"
]
=
None
,
**
input_kwargs
,
)
->
AsyncGenerator
[
str
,
None
]:
if
not
self
.
can_generate
:
raise
ValueError
(
"The current model does not support `stream_chat`."
)
loop
=
asyncio
.
get_running_loop
()
input_args
=
(
self
.
model
,
self
.
tokenizer
,
self
.
processor
,
self
.
template
,
self
.
generating_args
,
messages
,
system
,
tools
,
image
,
video
,
input_kwargs
,
)
async
with
self
.
semaphore
:
with
concurrent
.
futures
.
ThreadPoolExecutor
()
as
pool
:
stream
=
self
.
_stream_chat
(
*
input_args
)
while
True
:
try
:
yield
await
loop
.
run_in_executor
(
pool
,
stream
)
except
StopAsyncIteration
:
break
@
override
async
def
get_scores
(
self
,
batch_input
:
List
[
str
],
**
input_kwargs
,
)
->
List
[
float
]:
if
self
.
can_generate
:
raise
ValueError
(
"Cannot get scores using an auto-regressive model."
)
loop
=
asyncio
.
get_running_loop
()
input_args
=
(
self
.
model
,
self
.
tokenizer
,
batch_input
,
input_kwargs
)
async
with
self
.
semaphore
:
with
concurrent
.
futures
.
ThreadPoolExecutor
()
as
pool
:
return
await
loop
.
run_in_executor
(
pool
,
self
.
_get_scores
,
*
input_args
)
LLaMA-Factory/src/llamafactory/chat/vllm_engine.py
0 → 100644
View file @
802ef8b7
# Copyright 2024 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
uuid
from
typing
import
TYPE_CHECKING
,
Any
,
AsyncGenerator
,
AsyncIterator
,
Dict
,
List
,
Optional
,
Sequence
,
Union
from
typing_extensions
import
override
from
..data
import
get_template_and_fix_tokenizer
from
..extras.constants
import
IMAGE_PLACEHOLDER
from
..extras.logging
import
get_logger
from
..extras.misc
import
get_device_count
from
..extras.packages
import
is_pillow_available
,
is_vllm_available
from
..model
import
load_config
,
load_tokenizer
from
..model.model_utils.quantization
import
QuantizationMethod
from
..model.model_utils.visual
import
LlavaMultiModalProjectorForYiVLForVLLM
from
.base_engine
import
BaseEngine
,
Response
if
is_pillow_available
():
from
PIL
import
Image
from
PIL.Image
import
Image
as
ImageObject
if
is_vllm_available
():
from
vllm
import
AsyncEngineArgs
,
AsyncLLMEngine
,
RequestOutput
,
SamplingParams
from
vllm.lora.request
import
LoRARequest
if
TYPE_CHECKING
:
from
..data.mm_plugin
import
ImageInput
,
VideoInput
from
..hparams
import
DataArguments
,
FinetuningArguments
,
GeneratingArguments
,
ModelArguments
logger
=
get_logger
(
__name__
)
class
VllmEngine
(
BaseEngine
):
def
__init__
(
self
,
model_args
:
"ModelArguments"
,
data_args
:
"DataArguments"
,
finetuning_args
:
"FinetuningArguments"
,
generating_args
:
"GeneratingArguments"
,
)
->
None
:
config
=
load_config
(
model_args
)
# may download model from ms hub
if
getattr
(
config
,
"quantization_config"
,
None
):
# gptq models should use float16
quantization_config
:
Dict
[
str
,
Any
]
=
getattr
(
config
,
"quantization_config"
,
None
)
quant_method
=
quantization_config
.
get
(
"quant_method"
,
""
)
if
quant_method
==
QuantizationMethod
.
GPTQ
and
model_args
.
infer_dtype
==
"auto"
:
model_args
.
infer_dtype
=
"float16"
self
.
can_generate
=
finetuning_args
.
stage
==
"sft"
tokenizer_module
=
load_tokenizer
(
model_args
)
self
.
tokenizer
=
tokenizer_module
[
"tokenizer"
]
self
.
processor
=
tokenizer_module
[
"processor"
]
self
.
tokenizer
.
padding_side
=
"left"
self
.
template
=
get_template_and_fix_tokenizer
(
self
.
tokenizer
,
data_args
)
self
.
generating_args
=
generating_args
.
to_dict
()
engine_args
=
{
"model"
:
model_args
.
model_name_or_path
,
"trust_remote_code"
:
True
,
"download_dir"
:
model_args
.
cache_dir
,
"dtype"
:
model_args
.
infer_dtype
,
"max_model_len"
:
model_args
.
vllm_maxlen
,
"tensor_parallel_size"
:
get_device_count
()
or
1
,
"gpu_memory_utilization"
:
model_args
.
vllm_gpu_util
,
"disable_log_stats"
:
True
,
"disable_log_requests"
:
True
,
"enforce_eager"
:
model_args
.
vllm_enforce_eager
,
"enable_lora"
:
model_args
.
adapter_name_or_path
is
not
None
,
"max_lora_rank"
:
model_args
.
vllm_max_lora_rank
,
}
if
getattr
(
config
,
"is_yi_vl_derived_model"
,
None
):
import
vllm.model_executor.models.llava
logger
.
info
(
"Detected Yi-VL model, applying projector patch."
)
vllm
.
model_executor
.
models
.
llava
.
LlavaMultiModalProjector
=
LlavaMultiModalProjectorForYiVLForVLLM
self
.
model
=
AsyncLLMEngine
.
from_engine_args
(
AsyncEngineArgs
(
**
engine_args
))
if
model_args
.
adapter_name_or_path
is
not
None
:
self
.
lora_request
=
LoRARequest
(
"default"
,
1
,
model_args
.
adapter_name_or_path
[
0
])
else
:
self
.
lora_request
=
None
async
def
_generate
(
self
,
messages
:
Sequence
[
Dict
[
str
,
str
]],
system
:
Optional
[
str
]
=
None
,
tools
:
Optional
[
str
]
=
None
,
image
:
Optional
[
"ImageInput"
]
=
None
,
video
:
Optional
[
"VideoInput"
]
=
None
,
**
input_kwargs
,
)
->
AsyncIterator
[
"RequestOutput"
]:
request_id
=
"chatcmpl-{}"
.
format
(
uuid
.
uuid4
().
hex
)
if
image
is
not
None
:
if
IMAGE_PLACEHOLDER
not
in
messages
[
0
][
"content"
]:
messages
[
0
][
"content"
]
=
IMAGE_PLACEHOLDER
+
messages
[
0
][
"content"
]
paired_messages
=
messages
+
[{
"role"
:
"assistant"
,
"content"
:
""
}]
system
=
system
or
self
.
generating_args
[
"default_system"
]
prompt_ids
,
_
=
self
.
template
.
encode_oneturn
(
self
.
tokenizer
,
paired_messages
,
system
,
tools
)
prompt_length
=
len
(
prompt_ids
)
use_beam_search
:
bool
=
self
.
generating_args
[
"num_beams"
]
>
1
temperature
:
Optional
[
float
]
=
input_kwargs
.
pop
(
"temperature"
,
None
)
top_p
:
Optional
[
float
]
=
input_kwargs
.
pop
(
"top_p"
,
None
)
top_k
:
Optional
[
float
]
=
input_kwargs
.
pop
(
"top_k"
,
None
)
num_return_sequences
:
int
=
input_kwargs
.
pop
(
"num_return_sequences"
,
1
)
repetition_penalty
:
Optional
[
float
]
=
input_kwargs
.
pop
(
"repetition_penalty"
,
None
)
length_penalty
:
Optional
[
float
]
=
input_kwargs
.
pop
(
"length_penalty"
,
None
)
max_length
:
Optional
[
int
]
=
input_kwargs
.
pop
(
"max_length"
,
None
)
max_new_tokens
:
Optional
[
int
]
=
input_kwargs
.
pop
(
"max_new_tokens"
,
None
)
stop
:
Optional
[
Union
[
str
,
List
[
str
]]]
=
input_kwargs
.
pop
(
"stop"
,
None
)
if
"max_new_tokens"
in
self
.
generating_args
:
max_tokens
=
self
.
generating_args
[
"max_new_tokens"
]
elif
"max_length"
in
self
.
generating_args
:
if
self
.
generating_args
[
"max_length"
]
>
prompt_length
:
max_tokens
=
self
.
generating_args
[
"max_length"
]
-
prompt_length
else
:
max_tokens
=
1
if
max_length
:
max_tokens
=
max_length
-
prompt_length
if
max_length
>
prompt_length
else
1
if
max_new_tokens
:
max_tokens
=
max_new_tokens
sampling_params
=
SamplingParams
(
n
=
num_return_sequences
,
repetition_penalty
=
(
repetition_penalty
if
repetition_penalty
is
not
None
else
self
.
generating_args
[
"repetition_penalty"
]
)
or
1.0
,
# repetition_penalty must > 0
temperature
=
temperature
if
temperature
is
not
None
else
self
.
generating_args
[
"temperature"
],
top_p
=
(
top_p
if
top_p
is
not
None
else
self
.
generating_args
[
"top_p"
])
or
1.0
,
# top_p must > 0
top_k
=
top_k
if
top_k
is
not
None
else
self
.
generating_args
[
"top_k"
],
use_beam_search
=
use_beam_search
,
length_penalty
=
length_penalty
if
length_penalty
is
not
None
else
self
.
generating_args
[
"length_penalty"
],
stop
=
stop
,
stop_token_ids
=
[
self
.
tokenizer
.
eos_token_id
]
+
self
.
tokenizer
.
additional_special_tokens_ids
,
max_tokens
=
max_tokens
,
skip_special_tokens
=
True
,
)
if
image
is
not
None
:
# add image features
if
not
isinstance
(
image
,
(
str
,
ImageObject
)):
raise
ValueError
(
"Expected image input is a path or PIL.Image, but got {}."
.
format
(
type
(
image
)))
if
isinstance
(
image
,
str
):
image
=
Image
.
open
(
image
).
convert
(
"RGB"
)
multi_modal_data
=
{
"image"
:
image
}
else
:
multi_modal_data
=
None
result_generator
=
self
.
model
.
generate
(
inputs
=
{
"prompt_token_ids"
:
prompt_ids
,
"multi_modal_data"
:
multi_modal_data
},
sampling_params
=
sampling_params
,
request_id
=
request_id
,
lora_request
=
self
.
lora_request
,
)
return
result_generator
@
override
async
def
chat
(
self
,
messages
:
Sequence
[
Dict
[
str
,
str
]],
system
:
Optional
[
str
]
=
None
,
tools
:
Optional
[
str
]
=
None
,
image
:
Optional
[
"ImageInput"
]
=
None
,
video
:
Optional
[
"VideoInput"
]
=
None
,
**
input_kwargs
,
)
->
List
[
"Response"
]:
final_output
=
None
generator
=
await
self
.
_generate
(
messages
,
system
,
tools
,
image
,
video
,
**
input_kwargs
)
async
for
request_output
in
generator
:
final_output
=
request_output
results
=
[]
for
output
in
final_output
.
outputs
:
results
.
append
(
Response
(
response_text
=
output
.
text
,
response_length
=
len
(
output
.
token_ids
),
prompt_length
=
len
(
final_output
.
prompt_token_ids
),
finish_reason
=
output
.
finish_reason
,
)
)
return
results
@
override
async
def
stream_chat
(
self
,
messages
:
Sequence
[
Dict
[
str
,
str
]],
system
:
Optional
[
str
]
=
None
,
tools
:
Optional
[
str
]
=
None
,
image
:
Optional
[
"ImageInput"
]
=
None
,
video
:
Optional
[
"VideoInput"
]
=
None
,
**
input_kwargs
,
)
->
AsyncGenerator
[
str
,
None
]:
generated_text
=
""
generator
=
await
self
.
_generate
(
messages
,
system
,
tools
,
image
,
video
,
**
input_kwargs
)
async
for
result
in
generator
:
delta_text
=
result
.
outputs
[
0
].
text
[
len
(
generated_text
)
:]
generated_text
=
result
.
outputs
[
0
].
text
yield
delta_text
@
override
async
def
get_scores
(
self
,
batch_input
:
List
[
str
],
**
input_kwargs
,
)
->
List
[
float
]:
raise
NotImplementedError
(
"vLLM engine does not support get_scores."
)
LLaMA-Factory/src/llamafactory/cli.py
0 → 100644
View file @
802ef8b7
# Copyright 2024 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
os
import
random
import
subprocess
import
sys
from
enum
import
Enum
,
unique
from
.
import
launcher
from
.api.app
import
run_api
from
.chat.chat_model
import
run_chat
from
.eval.evaluator
import
run_eval
from
.extras.env
import
VERSION
,
print_env
from
.extras.logging
import
get_logger
from
.extras.misc
import
get_device_count
from
.train.tuner
import
export_model
,
run_exp
from
.webui.interface
import
run_web_demo
,
run_web_ui
USAGE
=
(
"-"
*
70
+
"
\n
"
+
"| Usage: |
\n
"
+
"| llamafactory-cli api -h: launch an OpenAI-style API server |
\n
"
+
"| llamafactory-cli chat -h: launch a chat interface in CLI |
\n
"
+
"| llamafactory-cli eval -h: evaluate models |
\n
"
+
"| llamafactory-cli export -h: merge LoRA adapters and export model |
\n
"
+
"| llamafactory-cli train -h: train models |
\n
"
+
"| llamafactory-cli webchat -h: launch a chat interface in Web UI |
\n
"
+
"| llamafactory-cli webui: launch LlamaBoard |
\n
"
+
"| llamafactory-cli version: show version info |
\n
"
+
"-"
*
70
)
WELCOME
=
(
"-"
*
58
+
"
\n
"
+
"| Welcome to LLaMA Factory, version {}"
.
format
(
VERSION
)
+
" "
*
(
21
-
len
(
VERSION
))
+
"|
\n
|"
+
" "
*
56
+
"|
\n
"
+
"| Project page: https://github.com/hiyouga/LLaMA-Factory |
\n
"
+
"-"
*
58
)
logger
=
get_logger
(
__name__
)
@
unique
class
Command
(
str
,
Enum
):
API
=
"api"
CHAT
=
"chat"
ENV
=
"env"
EVAL
=
"eval"
EXPORT
=
"export"
TRAIN
=
"train"
WEBDEMO
=
"webchat"
WEBUI
=
"webui"
VER
=
"version"
HELP
=
"help"
def
main
():
command
=
sys
.
argv
.
pop
(
1
)
if
len
(
sys
.
argv
)
!=
1
else
Command
.
HELP
if
command
==
Command
.
API
:
run_api
()
elif
command
==
Command
.
CHAT
:
run_chat
()
elif
command
==
Command
.
ENV
:
print_env
()
elif
command
==
Command
.
EVAL
:
run_eval
()
elif
command
==
Command
.
EXPORT
:
export_model
()
elif
command
==
Command
.
TRAIN
:
force_torchrun
=
os
.
environ
.
get
(
"FORCE_TORCHRUN"
,
"0"
).
lower
()
in
[
"true"
,
"1"
]
if
force_torchrun
or
get_device_count
()
>
1
:
master_addr
=
os
.
environ
.
get
(
"MASTER_ADDR"
,
"127.0.0.1"
)
master_port
=
os
.
environ
.
get
(
"MASTER_PORT"
,
str
(
random
.
randint
(
20001
,
29999
)))
logger
.
info
(
"Initializing distributed tasks at: {}:{}"
.
format
(
master_addr
,
master_port
))
process
=
subprocess
.
run
(
(
"torchrun --nnodes {nnodes} --node_rank {node_rank} --nproc_per_node {nproc_per_node} "
"--master_addr {master_addr} --master_port {master_port} {file_name} {args}"
).
format
(
nnodes
=
os
.
environ
.
get
(
"NNODES"
,
"1"
),
node_rank
=
os
.
environ
.
get
(
"RANK"
,
"0"
),
nproc_per_node
=
os
.
environ
.
get
(
"NPROC_PER_NODE"
,
str
(
get_device_count
())),
master_addr
=
master_addr
,
master_port
=
master_port
,
file_name
=
launcher
.
__file__
,
args
=
" "
.
join
(
sys
.
argv
[
1
:]),
),
shell
=
True
,
)
sys
.
exit
(
process
.
returncode
)
else
:
run_exp
()
elif
command
==
Command
.
WEBDEMO
:
run_web_demo
()
elif
command
==
Command
.
WEBUI
:
run_web_ui
()
elif
command
==
Command
.
VER
:
print
(
WELCOME
)
elif
command
==
Command
.
HELP
:
print
(
USAGE
)
else
:
raise
NotImplementedError
(
"Unknown command: {}."
.
format
(
command
))
LLaMA-Factory/src/llamafactory/data/__init__.py
0 → 100644
View file @
802ef8b7
# Copyright 2024 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
.collator
import
(
KTODataCollatorWithPadding
,
MultiModalDataCollatorForSeq2Seq
,
PairwiseDataCollatorWithPadding
,
SFTDataCollatorWith4DAttentionMask
,
)
from
.data_utils
import
Role
,
split_dataset
from
.loader
import
get_dataset
from
.template
import
TEMPLATES
,
Template
,
get_template_and_fix_tokenizer
__all__
=
[
"KTODataCollatorWithPadding"
,
"MultiModalDataCollatorForSeq2Seq"
,
"PairwiseDataCollatorWithPadding"
,
"SFTDataCollatorWith4DAttentionMask"
,
"Role"
,
"split_dataset"
,
"get_dataset"
,
"TEMPLATES"
,
"Template"
,
"get_template_and_fix_tokenizer"
,
]
LLaMA-Factory/src/llamafactory/data/aligner.py
0 → 100644
View file @
802ef8b7
# Copyright 2024 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
os
from
functools
import
partial
from
typing
import
TYPE_CHECKING
,
Any
,
Dict
,
List
,
Optional
,
Sequence
,
Union
from
..extras.logging
import
get_logger
from
.data_utils
import
Role
if
TYPE_CHECKING
:
from
datasets
import
Dataset
,
IterableDataset
from
transformers
import
Seq2SeqTrainingArguments
from
..hparams
import
DataArguments
from
.mm_plugin
import
ImageInput
,
VideoInput
from
.parser
import
DatasetAttr
logger
=
get_logger
(
__name__
)
def
_convert_images
(
images
:
Sequence
[
"ImageInput"
],
dataset_attr
:
"DatasetAttr"
,
data_args
:
"DataArguments"
,
)
->
Optional
[
List
[
"ImageInput"
]]:
r
"""
Optionally concatenates image path to dataset dir when loading from local disk.
"""
if
len
(
images
)
==
0
:
return
None
images
=
images
[:]
if
dataset_attr
.
load_from
in
[
"script"
,
"file"
]:
for
i
in
range
(
len
(
images
)):
if
isinstance
(
images
[
i
],
str
)
and
os
.
path
.
isfile
(
os
.
path
.
join
(
data_args
.
dataset_dir
,
images
[
i
])):
images
[
i
]
=
os
.
path
.
join
(
data_args
.
dataset_dir
,
images
[
i
])
return
images
def
_convert_videos
(
videos
:
Sequence
[
"VideoInput"
],
dataset_attr
:
"DatasetAttr"
,
data_args
:
"DataArguments"
,
)
->
Optional
[
List
[
"VideoInput"
]]:
r
"""
Optionally concatenates video path to dataset dir when loading from local disk.
"""
if
len
(
videos
)
==
0
:
return
None
videos
=
videos
[:]
if
dataset_attr
.
load_from
in
[
"script"
,
"file"
]:
for
i
in
range
(
len
(
videos
)):
if
isinstance
(
videos
[
i
],
str
)
and
os
.
path
.
isfile
(
os
.
path
.
join
(
data_args
.
dataset_dir
,
videos
[
i
])):
videos
[
i
]
=
os
.
path
.
join
(
data_args
.
dataset_dir
,
videos
[
i
])
return
videos
def
convert_alpaca
(
example
:
Dict
[
str
,
Any
],
dataset_attr
:
"DatasetAttr"
,
data_args
:
"DataArguments"
,
)
->
Dict
[
str
,
Any
]:
r
"""
Converts alpaca format dataset to the standard format.
"""
prompt
=
[]
if
dataset_attr
.
history
and
isinstance
(
example
[
dataset_attr
.
history
],
list
):
for
old_prompt
,
old_response
in
example
[
dataset_attr
.
history
]:
prompt
.
append
({
"role"
:
Role
.
USER
.
value
,
"content"
:
old_prompt
})
prompt
.
append
({
"role"
:
Role
.
ASSISTANT
.
value
,
"content"
:
old_response
})
query
=
[]
if
dataset_attr
.
prompt
and
example
[
dataset_attr
.
prompt
]:
query
.
append
(
example
[
dataset_attr
.
prompt
])
if
dataset_attr
.
query
and
example
[
dataset_attr
.
query
]:
query
.
append
(
example
[
dataset_attr
.
query
])
prompt
.
append
({
"role"
:
Role
.
USER
.
value
,
"content"
:
"
\n
"
.
join
(
query
)})
# "prompt\nquery"
if
dataset_attr
.
kto_tag
and
isinstance
(
example
[
dataset_attr
.
kto_tag
],
bool
):
# kto example
response
=
[{
"role"
:
Role
.
ASSISTANT
.
value
,
"content"
:
example
[
dataset_attr
.
response
]}]
if
example
[
dataset_attr
.
kto_tag
]:
response
=
response
+
[{
"role"
:
Role
.
ASSISTANT
.
value
,
"content"
:
""
}]
else
:
response
=
[{
"role"
:
Role
.
ASSISTANT
.
value
,
"content"
:
""
}]
+
response
elif
(
dataset_attr
.
ranking
and
isinstance
(
example
[
dataset_attr
.
chosen
],
str
)
and
isinstance
(
example
[
dataset_attr
.
rejected
],
str
)
):
# pairwise example
response
=
[
{
"role"
:
Role
.
ASSISTANT
.
value
,
"content"
:
example
[
dataset_attr
.
chosen
]},
{
"role"
:
Role
.
ASSISTANT
.
value
,
"content"
:
example
[
dataset_attr
.
rejected
]},
]
elif
dataset_attr
.
response
and
isinstance
(
example
[
dataset_attr
.
response
],
str
):
# normal example
response
=
[{
"role"
:
Role
.
ASSISTANT
.
value
,
"content"
:
example
[
dataset_attr
.
response
]}]
else
:
# unsupervised
response
=
[]
convert_images
=
partial
(
_convert_images
,
dataset_attr
=
dataset_attr
,
data_args
=
data_args
)
convert_videos
=
partial
(
_convert_videos
,
dataset_attr
=
dataset_attr
,
data_args
=
data_args
)
output
=
{
"_prompt"
:
prompt
,
"_response"
:
response
,
"_system"
:
example
[
dataset_attr
.
system
]
if
dataset_attr
.
system
else
""
,
"_tools"
:
example
[
dataset_attr
.
tools
]
if
dataset_attr
.
tools
else
""
,
"_images"
:
convert_images
(
example
[
dataset_attr
.
images
])
if
dataset_attr
.
images
else
None
,
"_videos"
:
convert_videos
(
example
[
dataset_attr
.
videos
])
if
dataset_attr
.
videos
else
None
,
}
return
output
def
convert_sharegpt
(
example
:
Dict
[
str
,
Any
],
dataset_attr
:
"DatasetAttr"
,
data_args
:
"DataArguments"
,
)
->
Dict
[
str
,
Any
]:
r
"""
Converts sharegpt format dataset to the standard format.
"""
tag_mapping
=
{
dataset_attr
.
user_tag
:
Role
.
USER
.
value
,
dataset_attr
.
assistant_tag
:
Role
.
ASSISTANT
.
value
,
dataset_attr
.
observation_tag
:
Role
.
OBSERVATION
.
value
,
dataset_attr
.
function_tag
:
Role
.
FUNCTION
.
value
,
dataset_attr
.
system_tag
:
Role
.
SYSTEM
.
value
,
}
odd_tags
=
(
dataset_attr
.
user_tag
,
dataset_attr
.
observation_tag
)
even_tags
=
(
dataset_attr
.
assistant_tag
,
dataset_attr
.
function_tag
)
accept_tags
=
(
odd_tags
,
even_tags
)
messages
=
example
[
dataset_attr
.
messages
]
if
(
dataset_attr
.
system_tag
and
len
(
messages
)
!=
0
and
messages
[
0
][
dataset_attr
.
role_tag
]
==
dataset_attr
.
system_tag
):
system
=
messages
[
0
][
dataset_attr
.
content_tag
]
messages
=
messages
[
1
:]
else
:
system
=
example
[
dataset_attr
.
system
]
if
dataset_attr
.
system
else
""
aligned_messages
=
[]
broken_data
=
False
for
turn_idx
,
message
in
enumerate
(
messages
):
if
message
[
dataset_attr
.
role_tag
]
not
in
accept_tags
[
turn_idx
%
2
]:
logger
.
warning
(
"Invalid role tag in {}."
.
format
(
messages
))
broken_data
=
True
aligned_messages
.
append
(
{
"role"
:
tag_mapping
[
message
[
dataset_attr
.
role_tag
]],
"content"
:
message
[
dataset_attr
.
content_tag
]}
)
if
(
not
dataset_attr
.
ranking
and
len
(
aligned_messages
)
%
2
!=
0
)
or
(
dataset_attr
.
ranking
and
len
(
aligned_messages
)
%
2
==
0
):
logger
.
warning
(
"Invalid message count in {}."
.
format
(
messages
))
broken_data
=
True
if
dataset_attr
.
kto_tag
and
isinstance
(
example
[
dataset_attr
.
kto_tag
],
bool
):
# kto example
prompt
=
aligned_messages
[:
-
1
]
response
=
aligned_messages
[
-
1
:]
if
example
[
dataset_attr
.
kto_tag
]:
response
=
response
+
[{
"role"
:
Role
.
ASSISTANT
.
value
,
"content"
:
""
}]
else
:
response
=
[{
"role"
:
Role
.
ASSISTANT
.
value
,
"content"
:
""
}]
+
response
elif
(
dataset_attr
.
ranking
and
isinstance
(
example
[
dataset_attr
.
chosen
],
dict
)
and
isinstance
(
example
[
dataset_attr
.
rejected
],
dict
)
):
# pairwise example
chosen
=
example
[
dataset_attr
.
chosen
]
rejected
=
example
[
dataset_attr
.
rejected
]
if
(
chosen
[
dataset_attr
.
role_tag
]
not
in
accept_tags
[
-
1
]
or
rejected
[
dataset_attr
.
role_tag
]
not
in
accept_tags
[
-
1
]
):
logger
.
warning
(
"Invalid role tag in {}."
.
format
([
chosen
,
rejected
]))
broken_data
=
True
prompt
=
aligned_messages
response
=
[
{
"role"
:
tag_mapping
[
chosen
[
dataset_attr
.
role_tag
]],
"content"
:
chosen
[
dataset_attr
.
content_tag
]},
{
"role"
:
tag_mapping
[
rejected
[
dataset_attr
.
role_tag
]],
"content"
:
rejected
[
dataset_attr
.
content_tag
]},
]
else
:
# normal example
prompt
=
aligned_messages
[:
-
1
]
response
=
aligned_messages
[
-
1
:]
if
broken_data
:
logger
.
warning
(
"Skipping this abnormal example."
)
prompt
,
response
=
[],
[]
convert_images
=
partial
(
_convert_images
,
dataset_attr
=
dataset_attr
,
data_args
=
data_args
)
convert_videos
=
partial
(
_convert_videos
,
dataset_attr
=
dataset_attr
,
data_args
=
data_args
)
output
=
{
"_prompt"
:
prompt
,
"_response"
:
response
,
"_system"
:
system
,
"_tools"
:
example
[
dataset_attr
.
tools
]
if
dataset_attr
.
tools
else
""
,
"_images"
:
convert_images
(
example
[
dataset_attr
.
images
])
if
dataset_attr
.
images
else
None
,
"_videos"
:
convert_videos
(
example
[
dataset_attr
.
videos
])
if
dataset_attr
.
videos
else
None
,
}
return
output
def
align_dataset
(
dataset
:
Union
[
"Dataset"
,
"IterableDataset"
],
dataset_attr
:
"DatasetAttr"
,
data_args
:
"DataArguments"
,
training_args
:
"Seq2SeqTrainingArguments"
,
)
->
Union
[
"Dataset"
,
"IterableDataset"
]:
r
"""
Aligned dataset:
_prompt: [{"role": "user", "content": "..."}] * (2T - 1)
_response: [{"role": "assistant", "content": "..."}] * N (N > 1 for ranking dataset)
_system: "..."
_tools: "...",
_images: [],
_videos: [],
"""
if
dataset_attr
.
formatting
==
"alpaca"
:
convert_func
=
partial
(
convert_alpaca
,
dataset_attr
=
dataset_attr
,
data_args
=
data_args
)
else
:
convert_func
=
partial
(
convert_sharegpt
,
dataset_attr
=
dataset_attr
,
data_args
=
data_args
)
column_names
=
list
(
next
(
iter
(
dataset
)).
keys
())
kwargs
=
{}
if
not
data_args
.
streaming
:
kwargs
=
dict
(
num_proc
=
data_args
.
preprocessing_num_workers
,
load_from_cache_file
=
(
not
data_args
.
overwrite_cache
)
or
(
training_args
.
local_process_index
!=
0
),
desc
=
"Converting format of dataset"
,
)
return
dataset
.
map
(
convert_func
,
batched
=
False
,
remove_columns
=
column_names
,
**
kwargs
,
)
LLaMA-Factory/src/llamafactory/data/collator.py
0 → 100644
View file @
802ef8b7
# Copyright 2024 OpenAccess AI Collective and the LlamaFactory team.
#
# This code is inspired by the OpenAccess AI Collective's axolotl library.
# https://github.com/OpenAccess-AI-Collective/axolotl/blob/main/src/axolotl/monkeypatch/utils.py
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
dataclasses
import
dataclass
from
typing
import
TYPE_CHECKING
,
Any
,
Dict
,
Literal
,
Optional
,
Sequence
import
torch
from
transformers
import
DataCollatorForSeq2Seq
if
TYPE_CHECKING
:
from
transformers
import
ProcessorMixin
from
.template
import
Template
def
prepare_4d_attention_mask
(
attention_mask_with_indices
:
"torch.Tensor"
,
dtype
:
"torch.dtype"
)
->
"torch.Tensor"
:
r
"""
Expands the attention mask with indices from (batch_size, seq_len) to (batch_size, 1, seq_len, seq_len),
while handles packed sequences and transforms the mask to lower triangular form to prevent future peeking.
e.g.
```python
# input
[[1, 1, 2, 2, 2, 0]]
# output
[
[
[
[o, x, x, x, x, x],
[o, o, x, x, x, x],
[x, x, o, x, x, x],
[x, x, o, o, x, x],
[x, x, o, o, o, x],
[x, x, x, x, x, x],
]
]
]
```
where `o` equals to `0.0`, `x` equals to `min_dtype`.
"""
bsz
,
seq_len
=
attention_mask_with_indices
.
size
()
min_dtype
=
torch
.
finfo
(
dtype
).
min
expanded_mask
=
attention_mask_with_indices
[:,
None
,
None
,
:].
expand
(
bsz
,
1
,
seq_len
,
seq_len
)
# Create a binary mask from the original mask where zeros remain zeros and all other values are set to one
padding_mask
=
torch
.
where
(
expanded_mask
!=
0
,
1
,
0
)
# Create a block-diagonal mask.
attention_mask_4d
=
torch
.
eq
(
expanded_mask
,
expanded_mask
.
transpose
(
-
1
,
-
2
)).
int
()
*
padding_mask
# Use the lower triangular mask to zero out the upper triangular part
attention_mask_4d
*=
torch
.
tril
(
torch
.
ones
((
seq_len
,
seq_len
),
dtype
=
torch
.
long
))
# Invert the attention mask.
attention_mask_4d
=
torch
.
where
(
attention_mask_4d
!=
0
,
torch
.
tensor
(
0
,
dtype
=
dtype
),
min_dtype
)
return
attention_mask_4d
@
dataclass
class
MultiModalDataCollatorForSeq2Seq
(
DataCollatorForSeq2Seq
):
r
"""
Data collator that supports VLMs.
Features should contain input_ids, attention_mask, labels and images.
"""
template
:
Optional
[
"Template"
]
=
None
processor
:
Optional
[
"ProcessorMixin"
]
=
None
def
__call__
(
self
,
features
:
Sequence
[
Dict
[
str
,
Any
]])
->
Dict
[
str
,
"torch.Tensor"
]:
batch_images
,
batch_videos
,
batch_imglens
,
batch_vidlens
,
batch_seqlens
=
[],
[],
[],
[],
[]
for
feature
in
features
:
images
=
feature
.
pop
(
"images"
,
None
)
or
[]
videos
=
feature
.
pop
(
"videos"
,
None
)
or
[]
batch_images
.
extend
(
images
)
batch_videos
.
extend
(
videos
)
batch_imglens
.
append
(
len
(
images
))
batch_vidlens
.
append
(
len
(
videos
))
batch_seqlens
.
append
(
len
(
feature
[
"input_ids"
]))
mm_inputs
=
self
.
template
.
mm_plugin
.
get_mm_inputs
(
batch_images
,
batch_videos
,
batch_imglens
,
batch_vidlens
,
batch_seqlens
,
self
.
processor
)
if
"token_type_ids"
in
mm_inputs
:
token_type_ids
=
mm_inputs
.
pop
(
"token_type_ids"
)
for
i
,
feature
in
enumerate
(
features
):
feature
[
"token_type_ids"
]
=
token_type_ids
[
i
]
features
:
Dict
[
str
,
"torch.Tensor"
]
=
super
().
__call__
(
features
)
features
.
update
(
mm_inputs
)
return
features
@
dataclass
class
SFTDataCollatorWith4DAttentionMask
(
MultiModalDataCollatorForSeq2Seq
):
r
"""
Data collator for 4d attention mask.
"""
block_diag_attn
:
bool
=
False
attn_implementation
:
Literal
[
"eager"
,
"sdpa"
,
"flash_attention_2"
]
=
"eager"
compute_dtype
:
"torch.dtype"
=
torch
.
float32
def
__call__
(
self
,
features
:
Sequence
[
Dict
[
str
,
Any
]])
->
Dict
[
str
,
"torch.Tensor"
]:
features
=
super
().
__call__
(
features
)
if
self
.
block_diag_attn
and
self
.
attn_implementation
!=
"flash_attention_2"
:
features
[
"attention_mask"
]
=
prepare_4d_attention_mask
(
features
[
"attention_mask"
],
self
.
compute_dtype
)
return
features
@
dataclass
class
PairwiseDataCollatorWithPadding
(
MultiModalDataCollatorForSeq2Seq
):
r
"""
Data collator for pairwise data.
"""
def
__call__
(
self
,
features
:
Sequence
[
Dict
[
str
,
Any
]])
->
Dict
[
str
,
"torch.Tensor"
]:
r
"""
Pads batched data to the longest sequence in the batch.
We generate 2 * n examples where the first n examples represent chosen examples and
the last n examples represent rejected examples.
"""
concatenated_features
=
[]
for
key
in
(
"chosen"
,
"rejected"
):
for
feature
in
features
:
target_feature
=
{
"input_ids"
:
feature
[
"{}_input_ids"
.
format
(
key
)],
"attention_mask"
:
feature
[
"{}_attention_mask"
.
format
(
key
)],
"labels"
:
feature
[
"{}_labels"
.
format
(
key
)],
"images"
:
feature
[
"images"
],
"videos"
:
feature
[
"videos"
],
}
concatenated_features
.
append
(
target_feature
)
return
super
().
__call__
(
concatenated_features
)
@
dataclass
class
KTODataCollatorWithPadding
(
MultiModalDataCollatorForSeq2Seq
):
r
"""
Data collator for KTO data.
"""
def
__call__
(
self
,
features
:
Sequence
[
Dict
[
str
,
Any
]])
->
Dict
[
str
,
"torch.Tensor"
]:
target_features
=
[]
kl_features
=
[]
kto_tags
=
[]
for
feature
in
features
:
target_feature
=
{
"input_ids"
:
feature
[
"input_ids"
],
"attention_mask"
:
feature
[
"attention_mask"
],
"labels"
:
feature
[
"labels"
],
"images"
:
feature
[
"images"
],
"videos"
:
feature
[
"videos"
],
}
kl_feature
=
{
"input_ids"
:
feature
[
"kl_input_ids"
],
"attention_mask"
:
feature
[
"kl_attention_mask"
],
"labels"
:
feature
[
"kl_labels"
],
"images"
:
feature
[
"images"
],
"videos"
:
feature
[
"videos"
],
}
target_features
.
append
(
target_feature
)
kl_features
.
append
(
kl_feature
)
kto_tags
.
append
(
feature
[
"kto_tags"
])
batch
=
super
().
__call__
(
target_features
)
kl_batch
=
super
().
__call__
(
kl_features
)
batch
[
"kl_input_ids"
]
=
kl_batch
[
"input_ids"
]
batch
[
"kl_attention_mask"
]
=
kl_batch
[
"attention_mask"
]
batch
[
"kl_labels"
]
=
kl_batch
[
"labels"
]
if
"token_type_ids"
in
kl_batch
:
batch
[
"kl_token_type_ids"
]
=
kl_batch
[
"token_type_ids"
]
batch
[
"kto_tags"
]
=
torch
.
tensor
(
kto_tags
)
return
batch
LLaMA-Factory/src/llamafactory/data/data_utils.py
0 → 100644
View file @
802ef8b7
# Copyright 2024 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
enum
import
Enum
,
unique
from
typing
import
TYPE_CHECKING
,
Dict
,
List
,
Optional
,
Sequence
,
Set
,
TypedDict
,
Union
from
datasets
import
DatasetDict
,
concatenate_datasets
,
interleave_datasets
from
..extras.logging
import
get_logger
if
TYPE_CHECKING
:
from
datasets
import
Dataset
,
IterableDataset
from
..hparams
import
DataArguments
logger
=
get_logger
(
__name__
)
SLOTS
=
Sequence
[
Union
[
str
,
Set
[
str
],
Dict
[
str
,
str
]]]
@
unique
class
Role
(
str
,
Enum
):
USER
=
"user"
ASSISTANT
=
"assistant"
SYSTEM
=
"system"
FUNCTION
=
"function"
OBSERVATION
=
"observation"
class
DatasetModule
(
TypedDict
):
train_dataset
:
Optional
[
Union
[
"Dataset"
,
"IterableDataset"
]]
eval_dataset
:
Optional
[
Union
[
"Dataset"
,
"IterableDataset"
]]
def
merge_dataset
(
all_datasets
:
List
[
Union
[
"Dataset"
,
"IterableDataset"
]],
data_args
:
"DataArguments"
,
seed
:
int
)
->
Union
[
"Dataset"
,
"IterableDataset"
]:
r
"""
Merges multiple datasets to a unified dataset.
"""
if
len
(
all_datasets
)
==
1
:
return
all_datasets
[
0
]
elif
data_args
.
mix_strategy
==
"concat"
:
if
data_args
.
streaming
:
logger
.
warning
(
"The samples between different datasets will not be mixed in streaming mode."
)
return
concatenate_datasets
(
all_datasets
)
elif
data_args
.
mix_strategy
.
startswith
(
"interleave"
):
if
not
data_args
.
streaming
:
logger
.
warning
(
"We recommend using `mix_strategy=concat` in non-streaming mode."
)
return
interleave_datasets
(
datasets
=
all_datasets
,
probabilities
=
data_args
.
interleave_probs
,
seed
=
seed
,
stopping_strategy
=
"first_exhausted"
if
data_args
.
mix_strategy
.
endswith
(
"under"
)
else
"all_exhausted"
,
)
else
:
raise
ValueError
(
"Unknown mixing strategy: {}."
.
format
(
data_args
.
mix_strategy
))
def
split_dataset
(
dataset
:
Union
[
"Dataset"
,
"IterableDataset"
],
data_args
:
"DataArguments"
,
seed
:
int
)
->
"DatasetDict"
:
r
"""
Splits the dataset and returns a dataset dict containing train set and validation set.
Supports both map dataset and iterable dataset.
"""
if
data_args
.
streaming
:
dataset
=
dataset
.
shuffle
(
buffer_size
=
data_args
.
buffer_size
,
seed
=
seed
)
val_set
=
dataset
.
take
(
int
(
data_args
.
val_size
))
train_set
=
dataset
.
skip
(
int
(
data_args
.
val_size
))
return
DatasetDict
({
"train"
:
train_set
,
"validation"
:
val_set
})
else
:
val_size
=
int
(
data_args
.
val_size
)
if
data_args
.
val_size
>
1
else
data_args
.
val_size
dataset
=
dataset
.
train_test_split
(
test_size
=
val_size
,
seed
=
seed
)
return
DatasetDict
({
"train"
:
dataset
[
"train"
],
"validation"
:
dataset
[
"test"
]})
LLaMA-Factory/src/llamafactory/data/formatter.py
0 → 100644
View file @
802ef8b7
# Copyright 2024 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
json
import
re
from
abc
import
ABC
,
abstractmethod
from
dataclasses
import
dataclass
,
field
from
typing
import
TYPE_CHECKING
,
List
,
Optional
,
Tuple
,
Union
from
typing_extensions
import
override
from
.data_utils
import
SLOTS
from
.tool_utils
import
get_tool_utils
if
TYPE_CHECKING
:
from
.tool_utils
import
FunctionCall
@
dataclass
class
Formatter
(
ABC
):
slots
:
SLOTS
=
field
(
default_factory
=
list
)
tool_format
:
Optional
[
str
]
=
None
@
abstractmethod
def
apply
(
self
,
**
kwargs
)
->
SLOTS
:
r
"""
Forms a list of slots according to the inputs to encode.
"""
...
def
extract
(
self
,
content
:
str
)
->
Union
[
str
,
List
[
"FunctionCall"
]]:
r
"""
Extract a list of tuples from the response message if using tools.
Each tuple consists of function name and function arguments.
"""
raise
NotImplementedError
@
dataclass
class
EmptyFormatter
(
Formatter
):
def
__post_init__
(
self
):
has_placeholder
=
False
for
slot
in
filter
(
lambda
s
:
isinstance
(
s
,
str
),
self
.
slots
):
if
re
.
search
(
r
"\{\{[a-zA-Z_][a-zA-Z0-9_]*\}\}"
,
slot
):
has_placeholder
=
True
if
has_placeholder
:
raise
ValueError
(
"Empty formatter should not contain any placeholder."
)
@
override
def
apply
(
self
,
**
kwargs
)
->
SLOTS
:
return
self
.
slots
@
dataclass
class
StringFormatter
(
Formatter
):
def
__post_init__
(
self
):
has_placeholder
=
False
for
slot
in
filter
(
lambda
s
:
isinstance
(
s
,
str
),
self
.
slots
):
if
re
.
search
(
r
"\{\{[a-zA-Z_][a-zA-Z0-9_]*\}\}"
,
slot
):
has_placeholder
=
True
if
not
has_placeholder
:
raise
ValueError
(
"A placeholder is required in the string formatter."
)
@
override
def
apply
(
self
,
**
kwargs
)
->
SLOTS
:
elements
=
[]
for
slot
in
self
.
slots
:
if
isinstance
(
slot
,
str
):
for
name
,
value
in
kwargs
.
items
():
if
not
isinstance
(
value
,
str
):
raise
RuntimeError
(
"Expected a string, got {}"
.
format
(
value
))
slot
=
slot
.
replace
(
"{{"
+
name
+
"}}"
,
value
,
1
)
elements
.
append
(
slot
)
elif
isinstance
(
slot
,
(
dict
,
set
)):
elements
.
append
(
slot
)
else
:
raise
RuntimeError
(
"Input must be string, set[str] or dict[str, str], got {}"
.
format
(
type
(
slot
)))
return
elements
@
dataclass
class
FunctionFormatter
(
Formatter
):
def
__post_init__
(
self
):
self
.
slots
=
get_tool_utils
(
self
.
tool_format
).
get_function_slots
()
+
self
.
slots
@
override
def
apply
(
self
,
**
kwargs
)
->
SLOTS
:
content
=
kwargs
.
pop
(
"content"
)
functions
:
List
[
Tuple
[
str
,
str
]]
=
[]
try
:
tool_calls
=
json
.
loads
(
content
)
if
not
isinstance
(
tool_calls
,
list
):
# parallel function call
tool_calls
=
[
tool_calls
]
for
tool_call
in
tool_calls
:
functions
.
append
((
tool_call
[
"name"
],
json
.
dumps
(
tool_call
[
"arguments"
],
ensure_ascii
=
False
)))
except
json
.
JSONDecodeError
:
raise
RuntimeError
(
"Invalid JSON format in function message: {}"
.
format
(
str
([
content
])))
# flat string
elements
=
[]
for
name
,
arguments
in
functions
:
for
slot
in
self
.
slots
:
if
isinstance
(
slot
,
str
):
slot
=
slot
.
replace
(
"{{name}}"
,
name
).
replace
(
"{{arguments}}"
,
arguments
)
elements
.
append
(
slot
)
elif
isinstance
(
slot
,
(
dict
,
set
)):
elements
.
append
(
slot
)
else
:
raise
RuntimeError
(
"Input must be string, set[str] or dict[str, str], got {}"
.
format
(
type
(
slot
)))
return
elements
@
dataclass
class
ToolFormatter
(
Formatter
):
def
__post_init__
(
self
):
self
.
tool_utils
=
get_tool_utils
(
self
.
tool_format
)
@
override
def
apply
(
self
,
**
kwargs
)
->
SLOTS
:
content
=
kwargs
.
pop
(
"content"
)
try
:
tools
=
json
.
loads
(
content
)
return
[
self
.
tool_utils
.
tool_formatter
(
tools
)
if
len
(
tools
)
!=
0
else
""
]
except
json
.
JSONDecodeError
:
raise
RuntimeError
(
"Invalid JSON format in tool description: {}"
.
format
(
str
([
content
])))
# flat string
@
override
def
extract
(
self
,
content
:
str
)
->
Union
[
str
,
List
[
"FunctionCall"
]]:
return
self
.
tool_utils
.
tool_extractor
(
content
)
LLaMA-Factory/src/llamafactory/data/loader.py
0 → 100644
View file @
802ef8b7
# Copyright 2024 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
os
import
sys
from
typing
import
TYPE_CHECKING
,
Dict
,
Literal
,
Optional
,
Sequence
,
Union
import
numpy
as
np
from
datasets
import
DatasetDict
,
load_dataset
,
load_from_disk
from
transformers.utils.versions
import
require_version
from
..extras.constants
import
FILEEXT2TYPE
from
..extras.logging
import
get_logger
from
..extras.misc
import
has_tokenized_data
from
.aligner
import
align_dataset
from
.data_utils
import
merge_dataset
,
split_dataset
from
.parser
import
get_dataset_list
from
.preprocess
import
get_preprocess_and_print_func
if
TYPE_CHECKING
:
from
datasets
import
Dataset
,
IterableDataset
from
transformers
import
PreTrainedTokenizer
,
ProcessorMixin
,
Seq2SeqTrainingArguments
from
..hparams
import
DataArguments
,
ModelArguments
from
.data_utils
import
DatasetModule
from
.parser
import
DatasetAttr
from
.template
import
Template
logger
=
get_logger
(
__name__
)
def
_load_single_dataset
(
dataset_attr
:
"DatasetAttr"
,
model_args
:
"ModelArguments"
,
data_args
:
"DataArguments"
,
training_args
:
"Seq2SeqTrainingArguments"
,
)
->
Union
[
"Dataset"
,
"IterableDataset"
]:
r
"""
Loads a single dataset and aligns it to the standard format.
"""
logger
.
info
(
"Loading dataset {}..."
.
format
(
dataset_attr
))
data_path
,
data_name
,
data_dir
,
data_files
=
None
,
None
,
None
,
None
if
dataset_attr
.
load_from
in
[
"hf_hub"
,
"ms_hub"
]:
data_path
=
dataset_attr
.
dataset_name
data_name
=
dataset_attr
.
subset
data_dir
=
dataset_attr
.
folder
elif
dataset_attr
.
load_from
==
"script"
:
data_path
=
os
.
path
.
join
(
data_args
.
dataset_dir
,
dataset_attr
.
dataset_name
)
data_name
=
dataset_attr
.
subset
data_dir
=
dataset_attr
.
folder
elif
dataset_attr
.
load_from
==
"file"
:
data_files
=
[]
local_path
=
os
.
path
.
join
(
data_args
.
dataset_dir
,
dataset_attr
.
dataset_name
)
if
os
.
path
.
isdir
(
local_path
):
# is directory
for
file_name
in
os
.
listdir
(
local_path
):
data_files
.
append
(
os
.
path
.
join
(
local_path
,
file_name
))
if
data_path
is
None
:
data_path
=
FILEEXT2TYPE
.
get
(
file_name
.
split
(
"."
)[
-
1
],
None
)
elif
data_path
!=
FILEEXT2TYPE
.
get
(
file_name
.
split
(
"."
)[
-
1
],
None
):
raise
ValueError
(
"File types should be identical."
)
elif
os
.
path
.
isfile
(
local_path
):
# is file
data_files
.
append
(
local_path
)
data_path
=
FILEEXT2TYPE
.
get
(
local_path
.
split
(
"."
)[
-
1
],
None
)
else
:
raise
ValueError
(
"File {} not found."
.
format
(
local_path
))
if
data_path
is
None
:
raise
ValueError
(
"Allowed file types: {}."
.
format
(
","
.
join
(
FILEEXT2TYPE
.
keys
())))
else
:
raise
NotImplementedError
(
"Unknown load type: {}."
.
format
(
dataset_attr
.
load_from
))
if
dataset_attr
.
load_from
==
"ms_hub"
:
require_version
(
"modelscope>=1.11.0"
,
"To fix: pip install modelscope>=1.11.0"
)
from
modelscope
import
MsDataset
from
modelscope.utils.config_ds
import
MS_DATASETS_CACHE
cache_dir
=
model_args
.
cache_dir
or
MS_DATASETS_CACHE
dataset
=
MsDataset
.
load
(
dataset_name
=
data_path
,
subset_name
=
data_name
,
data_dir
=
data_dir
,
data_files
=
data_files
,
split
=
dataset_attr
.
split
,
cache_dir
=
cache_dir
,
token
=
model_args
.
ms_hub_token
,
use_streaming
=
(
data_args
.
streaming
and
(
dataset_attr
.
load_from
!=
"file"
)),
)
if
isinstance
(
dataset
,
MsDataset
):
dataset
=
dataset
.
to_hf_dataset
()
else
:
dataset
=
load_dataset
(
path
=
data_path
,
name
=
data_name
,
data_dir
=
data_dir
,
data_files
=
data_files
,
split
=
dataset_attr
.
split
,
cache_dir
=
model_args
.
cache_dir
,
token
=
model_args
.
hf_hub_token
,
streaming
=
(
data_args
.
streaming
and
(
dataset_attr
.
load_from
!=
"file"
)),
trust_remote_code
=
True
,
)
if
data_args
.
streaming
and
(
dataset_attr
.
load_from
==
"file"
):
# faster than specifying streaming=True
dataset
=
dataset
.
to_iterable_dataset
()
# TODO: add num shards parameter
if
dataset_attr
.
num_samples
is
not
None
and
not
data_args
.
streaming
:
target_num
=
dataset_attr
.
num_samples
indexes
=
np
.
random
.
permutation
(
len
(
dataset
))[:
target_num
]
# all samples should be included
target_num
-=
len
(
indexes
)
if
target_num
>
0
:
expand_indexes
=
np
.
random
.
choice
(
len
(
dataset
),
target_num
)
indexes
=
np
.
concatenate
((
indexes
,
expand_indexes
),
axis
=
0
)
assert
len
(
indexes
)
==
dataset_attr
.
num_samples
,
"Sample num mismatched."
dataset
=
dataset
.
select
(
indexes
)
logger
.
info
(
"Sampled {} examples from dataset {}."
.
format
(
dataset_attr
.
num_samples
,
dataset_attr
))
if
data_args
.
max_samples
is
not
None
:
# truncate dataset
max_samples
=
min
(
data_args
.
max_samples
,
len
(
dataset
))
dataset
=
dataset
.
select
(
range
(
max_samples
))
return
align_dataset
(
dataset
,
dataset_attr
,
data_args
,
training_args
)
def
_get_merged_dataset
(
dataset_names
:
Optional
[
Sequence
[
str
]],
model_args
:
"ModelArguments"
,
data_args
:
"DataArguments"
,
training_args
:
"Seq2SeqTrainingArguments"
,
stage
:
Literal
[
"pt"
,
"sft"
,
"rm"
,
"ppo"
,
"kto"
],
)
->
Optional
[
Union
[
"Dataset"
,
"IterableDataset"
]]:
r
"""
Gets the merged datasets in the standard format.
"""
if
dataset_names
is
None
:
return
None
datasets
=
[]
for
dataset_attr
in
get_dataset_list
(
dataset_names
,
data_args
.
dataset_dir
):
if
(
stage
==
"rm"
and
dataset_attr
.
ranking
is
False
)
or
(
stage
!=
"rm"
and
dataset_attr
.
ranking
is
True
):
raise
ValueError
(
"The dataset is not applicable in the current training stage."
)
datasets
.
append
(
_load_single_dataset
(
dataset_attr
,
model_args
,
data_args
,
training_args
))
return
merge_dataset
(
datasets
,
data_args
,
seed
=
training_args
.
seed
)
def
_get_preprocessed_dataset
(
dataset
:
Optional
[
Union
[
"Dataset"
,
"IterableDataset"
]],
data_args
:
"DataArguments"
,
training_args
:
"Seq2SeqTrainingArguments"
,
stage
:
Literal
[
"pt"
,
"sft"
,
"rm"
,
"ppo"
,
"kto"
],
template
:
"Template"
,
tokenizer
:
"PreTrainedTokenizer"
,
processor
:
Optional
[
"ProcessorMixin"
]
=
None
,
is_eval
:
bool
=
False
,
)
->
Optional
[
Union
[
"Dataset"
,
"IterableDataset"
]]:
r
"""
Preprocesses the dataset, including format checking and tokenization.
"""
if
dataset
is
None
:
return
None
preprocess_func
,
print_function
=
get_preprocess_and_print_func
(
data_args
,
stage
,
template
,
tokenizer
,
processor
,
do_generate
=
(
training_args
.
predict_with_generate
and
is_eval
)
)
column_names
=
list
(
next
(
iter
(
dataset
)).
keys
())
kwargs
=
{}
if
not
data_args
.
streaming
:
kwargs
=
dict
(
num_proc
=
data_args
.
preprocessing_num_workers
,
load_from_cache_file
=
(
not
data_args
.
overwrite_cache
)
or
(
training_args
.
local_process_index
!=
0
),
desc
=
"Running tokenizer on dataset"
,
)
dataset
=
dataset
.
map
(
preprocess_func
,
batched
=
True
,
batch_size
=
data_args
.
preprocessing_batch_size
,
remove_columns
=
column_names
,
**
kwargs
,
)
if
training_args
.
should_log
:
try
:
print
(
"eval example:"
if
is_eval
else
"training example:"
)
print_function
(
next
(
iter
(
dataset
)))
except
StopIteration
:
if
stage
==
"pt"
:
raise
RuntimeError
(
"Cannot find sufficient samples, consider increasing dataset size."
)
else
:
raise
RuntimeError
(
"Cannot find valid samples, check `data/README.md` for the data format."
)
return
dataset
def
get_dataset
(
template
:
"Template"
,
model_args
:
"ModelArguments"
,
data_args
:
"DataArguments"
,
training_args
:
"Seq2SeqTrainingArguments"
,
stage
:
Literal
[
"pt"
,
"sft"
,
"rm"
,
"ppo"
,
"kto"
],
tokenizer
:
"PreTrainedTokenizer"
,
processor
:
Optional
[
"ProcessorMixin"
]
=
None
,
)
->
"DatasetModule"
:
r
"""
Gets the train dataset and optionally gets the evaluation dataset.
"""
# Load tokenized dataset
if
data_args
.
tokenized_path
is
not
None
:
if
has_tokenized_data
(
data_args
.
tokenized_path
):
logger
.
warning
(
"Loading dataset from disk will ignore other data arguments."
)
dataset_dict
:
"DatasetDict"
=
load_from_disk
(
data_args
.
tokenized_path
)
logger
.
info
(
"Loaded tokenized dataset from {}."
.
format
(
data_args
.
tokenized_path
))
dataset_module
:
Dict
[
str
,
"Dataset"
]
=
{}
if
"train"
in
dataset_dict
:
dataset_module
[
"train_dataset"
]
=
dataset_dict
[
"train"
]
if
"validation"
in
dataset_dict
:
dataset_module
[
"eval_dataset"
]
=
dataset_dict
[
"validation"
]
if
data_args
.
streaming
:
dataset_module
=
{
k
:
v
.
to_iterable_dataset
()
for
k
,
v
in
dataset_module
.
items
()}
return
dataset_module
if
data_args
.
streaming
:
raise
ValueError
(
"Turn off `streaming` when saving dataset to disk."
)
# Load and preprocess dataset
with
training_args
.
main_process_first
(
desc
=
"load dataset"
):
dataset
=
_get_merged_dataset
(
data_args
.
dataset
,
model_args
,
data_args
,
training_args
,
stage
)
eval_dataset
=
_get_merged_dataset
(
data_args
.
eval_dataset
,
model_args
,
data_args
,
training_args
,
stage
)
with
training_args
.
main_process_first
(
desc
=
"pre-process dataset"
):
dataset
=
_get_preprocessed_dataset
(
dataset
,
data_args
,
training_args
,
stage
,
template
,
tokenizer
,
processor
,
is_eval
=
False
)
eval_dataset
=
_get_preprocessed_dataset
(
eval_dataset
,
data_args
,
training_args
,
stage
,
template
,
tokenizer
,
processor
,
is_eval
=
True
)
if
data_args
.
val_size
>
1e-6
:
dataset_dict
=
split_dataset
(
dataset
,
data_args
,
seed
=
training_args
.
seed
)
else
:
dataset_dict
=
{}
if
dataset
is
not
None
:
if
data_args
.
streaming
:
dataset
=
dataset
.
shuffle
(
buffer_size
=
data_args
.
buffer_size
,
seed
=
training_args
.
seed
)
dataset_dict
[
"train"
]
=
dataset
if
eval_dataset
is
not
None
:
if
data_args
.
streaming
:
eval_dataset
=
eval_dataset
.
shuffle
(
buffer_size
=
data_args
.
buffer_size
,
seed
=
training_args
.
seed
)
dataset_dict
[
"validation"
]
=
eval_dataset
dataset_dict
=
DatasetDict
(
dataset_dict
)
if
data_args
.
tokenized_path
is
not
None
:
if
training_args
.
should_save
:
dataset_dict
.
save_to_disk
(
data_args
.
tokenized_path
)
logger
.
info
(
"Tokenized dataset saved at {}."
.
format
(
data_args
.
tokenized_path
))
logger
.
info
(
"Please restart the training with `tokenized_path: {}`."
.
format
(
data_args
.
tokenized_path
))
sys
.
exit
(
0
)
dataset_module
=
{}
if
"train"
in
dataset_dict
:
dataset_module
[
"train_dataset"
]
=
dataset_dict
[
"train"
]
if
"validation"
in
dataset_dict
:
dataset_module
[
"eval_dataset"
]
=
dataset_dict
[
"validation"
]
return
dataset_module
LLaMA-Factory/src/llamafactory/data/mm_plugin.py
0 → 100644
View file @
802ef8b7
import
math
from
copy
import
deepcopy
from
io
import
BytesIO
from
typing
import
TYPE_CHECKING
,
Dict
,
List
,
Optional
,
Sequence
,
Tuple
,
TypedDict
,
Union
import
numpy
as
np
from
transformers.image_utils
import
get_image_size
,
to_numpy_array
from
typing_extensions
import
override
from
..extras.constants
import
IGNORE_INDEX
,
IMAGE_PLACEHOLDER
,
VIDEO_PLACEHOLDER
from
..extras.packages
import
is_pillow_available
,
is_pyav_available
if
is_pillow_available
():
from
PIL
import
Image
from
PIL.Image
import
Image
as
ImageObject
if
is_pyav_available
():
import
av
if
TYPE_CHECKING
:
import
torch
from
av.stream
import
Stream
from
transformers
import
PreTrainedTokenizer
,
ProcessorMixin
from
transformers.image_processing_utils
import
BaseImageProcessor
class
EncodedImage
(
TypedDict
):
path
:
Optional
[
str
]
bytes
:
Optional
[
bytes
]
ImageInput
=
Union
[
str
,
EncodedImage
,
ImageObject
]
VideoInput
=
str
def
_get_paligemma_token_type_ids
(
imglens
:
Sequence
[
int
],
seqlens
:
Sequence
[
int
],
processor
:
"ProcessorMixin"
)
->
List
[
List
[
int
]]:
r
"""
Gets paligemma token type ids for computing loss.
Returns:
batch_token_type_ids: shape (batch_size, sequence_length)
"""
batch_token_type_ids
=
[]
for
imglen
,
seqlen
in
zip
(
imglens
,
seqlens
):
image_seqlen
=
imglen
*
getattr
(
processor
,
"image_seqlen"
)
batch_token_type_ids
.
append
([
0
]
*
image_seqlen
+
[
1
]
*
(
seqlen
-
image_seqlen
))
return
batch_token_type_ids
class
BasePlugin
:
def
__init__
(
self
,
image_token
:
Optional
[
str
],
video_token
:
Optional
[
str
])
->
None
:
self
.
image_token
=
image_token
self
.
video_token
=
video_token
def
_validate_input
(
self
,
images
:
Sequence
[
"ImageInput"
],
videos
:
Sequence
[
"VideoInput"
],
)
->
None
:
r
"""
Validates if this model accepts the input modalities.
"""
if
len
(
images
)
!=
0
and
self
.
image_token
is
None
:
raise
ValueError
(
"This model does not support image input."
)
if
len
(
videos
)
!=
0
and
self
.
video_token
is
None
:
raise
ValueError
(
"This model does not support video input."
)
def
_preprocess_image
(
self
,
image
:
"ImageObject"
,
**
kwargs
)
->
"ImageObject"
:
r
"""
Pre-processes a single image.
"""
image_resolution
:
int
=
kwargs
.
get
(
"image_resolution"
)
if
max
(
image
.
width
,
image
.
height
)
>
image_resolution
:
resize_factor
=
image_resolution
/
max
(
image
.
width
,
image
.
height
)
width
,
height
=
int
(
image
.
width
*
resize_factor
),
int
(
image
.
height
*
resize_factor
)
image
=
image
.
resize
((
width
,
height
),
resample
=
Image
.
NEAREST
)
if
image
.
mode
!=
"RGB"
:
image
=
image
.
convert
(
"RGB"
)
return
image
def
_get_video_sample_frames
(
self
,
video_stream
:
"Stream"
,
**
kwargs
)
->
int
:
r
"""
Computes video sample frames according to fps.
"""
video_fps
:
float
=
kwargs
.
get
(
"video_fps"
)
video_maxlen
:
int
=
kwargs
.
get
(
"video_maxlen"
)
total_frames
=
video_stream
.
frames
sample_frames
=
float
(
video_stream
.
duration
*
video_stream
.
time_base
)
*
video_fps
sample_frames
=
min
(
total_frames
,
video_maxlen
,
sample_frames
)
return
math
.
floor
(
sample_frames
)
def
_regularize_images
(
self
,
images
:
Sequence
[
"ImageInput"
],
**
kwargs
)
->
List
[
"ImageObject"
]:
r
"""
Regularizes images to avoid error. Including reading and pre-processing.
"""
results
=
[]
for
image
in
images
:
if
isinstance
(
image
,
str
):
image
=
Image
.
open
(
image
)
elif
isinstance
(
image
,
dict
):
if
image
[
"bytes"
]
is
not
None
:
image
=
Image
.
open
(
BytesIO
(
image
[
"bytes"
]))
else
:
image
=
Image
.
open
(
image
[
"path"
])
if
not
isinstance
(
image
,
ImageObject
):
raise
ValueError
(
"Expect input is a list of Images, but got {}."
.
format
(
type
(
image
)))
results
.
append
(
self
.
_preprocess_image
(
image
,
**
kwargs
))
return
results
def
_regularize_videos
(
self
,
videos
:
Sequence
[
"VideoInput"
],
**
kwargs
)
->
List
[
List
[
"ImageObject"
]]:
r
"""
Regularizes videos to avoid error. Including reading, resizing and converting.
"""
results
=
[]
for
video
in
videos
:
container
=
av
.
open
(
video
,
"r"
)
video_stream
=
next
(
stream
for
stream
in
container
.
streams
if
stream
.
type
==
"video"
)
total_frames
=
video_stream
.
frames
sample_frames
=
self
.
_get_video_sample_frames
(
video_stream
,
**
kwargs
)
sample_indices
=
np
.
linspace
(
0
,
total_frames
-
1
,
sample_frames
).
astype
(
np
.
int32
)
frames
:
List
[
"ImageObject"
]
=
[]
container
.
seek
(
0
)
for
frame_idx
,
frame
in
enumerate
(
container
.
decode
(
video_stream
)):
if
frame_idx
in
sample_indices
:
frames
.
append
(
frame
.
to_image
())
frames
=
self
.
_regularize_images
(
frames
,
**
kwargs
)
results
.
append
(
frames
)
return
results
def
_get_mm_inputs
(
self
,
images
:
Sequence
[
"ImageInput"
],
videos
:
Sequence
[
"VideoInput"
],
processor
:
"ProcessorMixin"
,
)
->
Dict
[
str
,
"torch.Tensor"
]:
r
"""
Processes visual inputs.
Returns: (llava and paligemma)
pixel_values: tensor with shape (B, C, H, W)
Returns: (qwen2-vl)
pixel_values: tensor with shape (num_patches, patch_dim)
image_grid_thw: tensor with shape (num_images, 3), where the three numbers are time, width, height
It holds num_patches == torch.prod(image_grid_thw)
"""
image_processor
:
"BaseImageProcessor"
=
getattr
(
processor
,
"image_processor"
)
video_processor
:
"BaseImageProcessor"
=
getattr
(
processor
,
"video_processor"
,
image_processor
)
input_dict
=
{
"images"
:
None
}
# default key
if
len
(
images
)
!=
0
:
images
=
self
.
_regularize_images
(
images
,
image_resolution
=
getattr
(
processor
,
"image_resolution"
,
512
),
)
input_dict
[
"images"
]
=
images
if
len
(
videos
)
!=
0
:
videos
=
self
.
_regularize_videos
(
videos
,
image_resolution
=
getattr
(
processor
,
"video_resolution"
,
128
),
video_fps
=
getattr
(
processor
,
"video_fps"
,
1.0
),
video_maxlen
=
getattr
(
processor
,
"video_maxlen"
,
64
),
)
input_dict
[
"videos"
]
=
videos
mm_inputs
=
{}
if
image_processor
!=
video_processor
:
if
input_dict
.
get
(
"images"
)
is
not
None
:
mm_inputs
.
update
(
image_processor
(
input_dict
[
"images"
],
return_tensors
=
"pt"
))
if
input_dict
.
get
(
"videos"
)
is
not
None
:
mm_inputs
.
update
(
video_processor
(
input_dict
[
"videos"
],
return_tensors
=
"pt"
))
elif
input_dict
.
get
(
"images"
)
is
not
None
or
input_dict
.
get
(
"videos"
)
is
not
None
:
# same processor (qwen2-vl)
mm_inputs
.
update
(
image_processor
(
**
input_dict
,
return_tensors
=
"pt"
))
return
mm_inputs
def
process_messages
(
self
,
messages
:
Sequence
[
Dict
[
str
,
str
]],
images
:
Sequence
[
"ImageInput"
],
videos
:
Sequence
[
"VideoInput"
],
processor
:
Optional
[
"ProcessorMixin"
],
)
->
List
[
Dict
[
str
,
str
]]:
r
"""
Pre-processes input messages before tokenization for VLMs.
"""
self
.
_validate_input
(
images
,
videos
)
return
messages
def
process_token_ids
(
self
,
input_ids
:
List
[
int
],
labels
:
Optional
[
List
[
int
]],
images
:
Sequence
[
"ImageInput"
],
videos
:
Sequence
[
"VideoInput"
],
tokenizer
:
"PreTrainedTokenizer"
,
processor
:
Optional
[
"ProcessorMixin"
],
)
->
Tuple
[
List
[
int
],
Optional
[
List
[
int
]]]:
r
"""
Pre-processes token ids after tokenization for VLMs.
"""
self
.
_validate_input
(
images
,
videos
)
return
input_ids
,
labels
def
get_mm_inputs
(
self
,
images
:
Sequence
[
"ImageInput"
],
videos
:
Sequence
[
"VideoInput"
],
imglens
:
Sequence
[
int
],
vidlens
:
Sequence
[
int
],
seqlens
:
Sequence
[
int
],
processor
:
Optional
[
"ProcessorMixin"
],
)
->
Dict
[
str
,
Union
[
List
[
int
],
"torch.Tensor"
]]:
r
"""
Builds batched multimodal inputs for VLMs.
"""
self
.
_validate_input
(
images
,
videos
)
return
{}
class
LlavaPlugin
(
BasePlugin
):
@
override
def
process_messages
(
self
,
messages
:
Sequence
[
Dict
[
str
,
str
]],
images
:
Sequence
[
"ImageInput"
],
videos
:
Sequence
[
"VideoInput"
],
processor
:
Optional
[
"ProcessorMixin"
],
)
->
List
[
Dict
[
str
,
str
]]:
self
.
_validate_input
(
images
,
videos
)
num_image_tokens
=
0
image_seqlen
=
getattr
(
processor
,
"image_seqlen"
)
messages
=
deepcopy
(
messages
)
for
message
in
messages
:
content
=
message
[
"content"
]
while
IMAGE_PLACEHOLDER
in
content
:
num_image_tokens
+=
1
content
=
content
.
replace
(
IMAGE_PLACEHOLDER
,
"{{image}}"
,
1
)
message
[
"content"
]
=
content
.
replace
(
"{{image}}"
,
self
.
image_token
*
image_seqlen
)
if
len
(
images
)
!=
num_image_tokens
:
raise
ValueError
(
"The number of images does not match the number of {} tokens"
.
format
(
IMAGE_PLACEHOLDER
))
return
messages
@
override
def
get_mm_inputs
(
self
,
images
:
Sequence
[
"ImageInput"
],
videos
:
Sequence
[
"VideoInput"
],
imglens
:
Sequence
[
int
],
vidlens
:
Sequence
[
int
],
seqlens
:
Sequence
[
int
],
processor
:
Optional
[
"ProcessorMixin"
],
)
->
Dict
[
str
,
Union
[
List
[
int
],
"torch.Tensor"
]]:
self
.
_validate_input
(
images
,
videos
)
return
self
.
_get_mm_inputs
(
images
,
videos
,
processor
)
class
LlavaNextPlugin
(
BasePlugin
):
@
override
def
process_messages
(
self
,
messages
:
Sequence
[
Dict
[
str
,
str
]],
images
:
Sequence
[
"ImageInput"
],
videos
:
Sequence
[
"VideoInput"
],
processor
:
Optional
[
"ProcessorMixin"
],
)
->
List
[
Dict
[
str
,
str
]]:
self
.
_validate_input
(
images
,
videos
)
num_image_tokens
=
0
messages
=
deepcopy
(
messages
)
mm_inputs
=
self
.
_get_mm_inputs
(
images
,
videos
,
processor
)
if
"image_sizes"
in
mm_inputs
:
image_sizes
=
iter
(
mm_inputs
[
"image_sizes"
])
if
"pixel_values"
in
mm_inputs
:
height
,
width
=
get_image_size
(
to_numpy_array
(
mm_inputs
[
"pixel_values"
][
0
][
0
]))
for
message
in
messages
:
content
=
message
[
"content"
]
while
self
.
image_token
in
content
:
image_size
=
next
(
image_sizes
)
orig_height
,
orig_width
=
image_size
image_seqlen
=
processor
.
_get_number_of_features
(
orig_height
,
orig_width
,
height
,
width
)
if
processor
.
vision_feature_select_strategy
==
"default"
:
image_seqlen
-=
1
num_image_tokens
+=
1
content
=
content
.
replace
(
self
.
image_token
,
"{{image}}"
*
image_seqlen
,
1
)
message
[
"content"
]
=
content
.
replace
(
"{{image}}"
,
self
.
image_token
)
if
len
(
images
)
!=
num_image_tokens
:
raise
ValueError
(
"The number of images does not match the number of {} tokens"
.
format
(
IMAGE_PLACEHOLDER
))
return
messages
@
override
def
get_mm_inputs
(
self
,
images
:
Sequence
[
"ImageInput"
],
videos
:
Sequence
[
"VideoInput"
],
imglens
:
Sequence
[
int
],
vidlens
:
Sequence
[
int
],
seqlens
:
Sequence
[
int
],
processor
:
Optional
[
"ProcessorMixin"
],
)
->
Dict
[
str
,
Union
[
List
[
int
],
"torch.Tensor"
]]:
self
.
_validate_input
(
images
,
videos
)
res
=
self
.
_get_mm_inputs
(
images
,
videos
,
processor
)
return
res
class
LlavaNextVideoPlugin
(
BasePlugin
):
@
override
def
process_messages
(
self
,
messages
:
Sequence
[
Dict
[
str
,
str
]],
images
:
Sequence
[
"ImageInput"
],
videos
:
Sequence
[
"VideoInput"
],
processor
:
Optional
[
"ProcessorMixin"
],
)
->
List
[
Dict
[
str
,
str
]]:
self
.
_validate_input
(
images
,
videos
)
num_image_tokens
=
0
num_video_tokens
=
0
messages
=
deepcopy
(
messages
)
mm_inputs
=
self
.
_get_mm_inputs
(
images
,
videos
,
processor
)
if
"pixel_values"
in
mm_inputs
:
image_sizes
=
iter
(
mm_inputs
[
"image_sizes"
])
height
,
width
=
get_image_size
(
to_numpy_array
(
mm_inputs
[
"pixel_values"
][
0
][
0
]))
for
message
in
messages
:
content
=
message
[
"content"
]
while
self
.
image_token
in
content
:
image_size
=
next
(
image_sizes
)
orig_height
,
orig_width
=
image_size
image_seqlen
=
processor
.
_get_number_of_features
(
orig_height
,
orig_width
,
height
,
width
)
if
processor
.
vision_feature_select_strategy
==
"default"
:
image_seqlen
-=
1
num_image_tokens
+=
1
content
=
content
.
replace
(
self
.
image_token
,
"{{image}}"
*
image_seqlen
,
1
)
message
[
"content"
]
=
content
.
replace
(
"{{image}}"
,
self
.
image_token
)
if
"pixel_values_videos"
in
mm_inputs
:
pixel_values_video
=
to_numpy_array
(
mm_inputs
.
get
(
"pixel_values_videos"
)[
0
])
height
,
width
=
get_image_size
(
pixel_values_video
[
0
])
num_frames
=
pixel_values_video
.
shape
[
0
]
# frame dim is always after batch dim
image_seqlen
=
(
height
//
processor
.
patch_size
)
*
(
width
//
processor
.
patch_size
)
video_seqlen
=
image_seqlen
//
4
*
num_frames
# divide by 4 needed for avg pooling layer
for
message
in
messages
:
content
=
message
[
"content"
]
while
self
.
video_token
in
content
:
num_video_tokens
+=
1
content
=
content
.
replace
(
self
.
video_token
,
"{{video}}"
,
1
)
message
[
"content"
]
=
content
.
replace
(
"{{video}}"
,
self
.
video_token
*
video_seqlen
)
if
len
(
images
)
!=
num_image_tokens
:
raise
ValueError
(
"The number of images does not match the number of {} tokens"
.
format
(
IMAGE_PLACEHOLDER
))
if
len
(
videos
)
!=
num_video_tokens
:
raise
ValueError
(
"The number of videos does not match the number of {} tokens"
.
format
(
IMAGE_PLACEHOLDER
))
return
messages
@
override
def
get_mm_inputs
(
self
,
images
:
Sequence
[
"ImageInput"
],
videos
:
Sequence
[
"VideoInput"
],
imglens
:
Sequence
[
int
],
vidlens
:
Sequence
[
int
],
seqlens
:
Sequence
[
int
],
processor
:
Optional
[
"ProcessorMixin"
],
)
->
Dict
[
str
,
Union
[
List
[
int
],
"torch.Tensor"
]]:
self
.
_validate_input
(
images
,
videos
)
return
self
.
_get_mm_inputs
(
images
,
videos
,
processor
)
class
PaliGemmaPlugin
(
BasePlugin
):
@
override
def
process_messages
(
self
,
messages
:
Sequence
[
Dict
[
str
,
str
]],
images
:
Sequence
[
"ImageInput"
],
videos
:
Sequence
[
"VideoInput"
],
processor
:
Optional
[
"ProcessorMixin"
],
)
->
List
[
Dict
[
str
,
str
]]:
self
.
_validate_input
(
images
,
videos
)
num_image_tokens
=
0
messages
=
deepcopy
(
messages
)
for
message
in
messages
:
content
=
message
[
"content"
]
while
IMAGE_PLACEHOLDER
in
content
:
num_image_tokens
+=
1
content
=
content
.
replace
(
IMAGE_PLACEHOLDER
,
"{{image}}"
,
1
)
message
[
"content"
]
=
content
.
replace
(
"{{image}}"
,
""
)
if
len
(
images
)
!=
num_image_tokens
:
raise
ValueError
(
"The number of images does not match the number of {} tokens"
.
format
(
IMAGE_PLACEHOLDER
))
return
messages
@
override
def
process_token_ids
(
self
,
input_ids
:
List
[
int
],
labels
:
Optional
[
List
[
int
]],
images
:
Sequence
[
"ImageInput"
],
videos
:
Sequence
[
"VideoInput"
],
tokenizer
:
"PreTrainedTokenizer"
,
processor
:
Optional
[
"ProcessorMixin"
],
)
->
Tuple
[
List
[
int
],
Optional
[
List
[
int
]]]:
self
.
_validate_input
(
images
,
videos
)
num_images
=
len
(
images
)
image_seqlen
=
num_images
*
getattr
(
processor
,
"image_seqlen"
)
image_token_id
=
tokenizer
.
convert_tokens_to_ids
(
self
.
image_token
)
input_ids
=
[
image_token_id
]
*
image_seqlen
+
input_ids
if
labels
is
not
None
:
labels
=
[
IGNORE_INDEX
]
*
image_seqlen
+
labels
return
input_ids
,
labels
@
override
def
get_mm_inputs
(
self
,
images
:
Sequence
[
"ImageInput"
],
videos
:
Sequence
[
"VideoInput"
],
imglens
:
Sequence
[
int
],
vidlens
:
Sequence
[
int
],
seqlens
:
Sequence
[
int
],
processor
:
Optional
[
"ProcessorMixin"
],
)
->
Dict
[
str
,
Union
[
List
[
int
],
"torch.Tensor"
]]:
self
.
_validate_input
(
images
,
videos
)
mm_inputs
=
self
.
_get_mm_inputs
(
images
,
videos
,
processor
)
mm_inputs
[
"token_type_ids"
]
=
_get_paligemma_token_type_ids
(
imglens
,
seqlens
,
processor
)
return
mm_inputs
class
Qwen2vlPlugin
(
BasePlugin
):
@
override
def
_preprocess_image
(
self
,
image
:
"ImageObject"
,
**
kwargs
)
->
"ImageObject"
:
image
=
super
().
_preprocess_image
(
image
,
**
kwargs
)
if
min
(
image
.
width
,
image
.
height
)
<
28
:
width
,
height
=
max
(
image
.
width
,
28
),
max
(
image
.
height
,
28
)
image
=
image
.
resize
((
width
,
height
),
resample
=
Image
.
NEAREST
)
if
image
.
width
/
image
.
height
>
200
:
width
,
height
=
image
.
height
*
180
,
image
.
height
image
=
image
.
resize
((
width
,
height
),
resample
=
Image
.
NEAREST
)
if
image
.
height
/
image
.
width
>
200
:
width
,
height
=
image
.
width
,
image
.
width
*
180
image
=
image
.
resize
((
width
,
height
),
resample
=
Image
.
NEAREST
)
return
image
@
override
def
_get_video_sample_frames
(
self
,
video_stream
:
"Stream"
,
**
kwargs
)
->
int
:
sample_frames
=
super
().
_get_video_sample_frames
(
video_stream
,
**
kwargs
)
sample_frames
=
sample_frames
//
2
*
2
return
sample_frames
@
override
def
process_messages
(
self
,
messages
:
Sequence
[
Dict
[
str
,
str
]],
images
:
Sequence
[
"ImageInput"
],
videos
:
Sequence
[
"VideoInput"
],
processor
:
Optional
[
"ProcessorMixin"
],
)
->
List
[
Dict
[
str
,
str
]]:
self
.
_validate_input
(
images
,
videos
)
image_processor
:
"BaseImageProcessor"
=
getattr
(
processor
,
"image_processor"
)
merge_length
:
int
=
getattr
(
image_processor
,
"merge_size"
)
**
2
mm_inputs
=
self
.
_get_mm_inputs
(
images
,
videos
,
processor
)
image_grid_thw
=
mm_inputs
.
get
(
"image_grid_thw"
,
[])
video_grid_thw
=
mm_inputs
.
get
(
"video_grid_thw"
,
[])
num_image_tokens
,
num_video_tokens
=
0
,
0
messages
=
deepcopy
(
messages
)
for
message
in
messages
:
content
=
message
[
"content"
]
while
IMAGE_PLACEHOLDER
in
content
:
if
num_image_tokens
>=
len
(
image_grid_thw
):
raise
ValueError
(
"`len(images)` is less than the number of {} tokens."
.
format
(
IMAGE_PLACEHOLDER
))
content
=
content
.
replace
(
IMAGE_PLACEHOLDER
,
"<|vision_start|>{}<|vision_end|>"
.
format
(
self
.
image_token
*
(
image_grid_thw
[
num_image_tokens
].
prod
()
//
merge_length
)
),
1
,
)
num_image_tokens
+=
1
while
VIDEO_PLACEHOLDER
in
content
:
if
num_video_tokens
>=
len
(
video_grid_thw
):
raise
ValueError
(
"`len(videos)` is less than the number of {} tokens."
.
format
(
VIDEO_PLACEHOLDER
))
content
=
content
.
replace
(
VIDEO_PLACEHOLDER
,
"<|vision_start|>{}<|vision_end|>"
.
format
(
self
.
video_token
*
(
video_grid_thw
[
num_video_tokens
].
prod
()
//
merge_length
)
),
1
,
)
num_video_tokens
+=
1
message
[
"content"
]
=
content
if
len
(
images
)
!=
num_image_tokens
:
raise
ValueError
(
"The number of images does not match the number of {} tokens"
.
format
(
IMAGE_PLACEHOLDER
))
if
len
(
videos
)
!=
num_video_tokens
:
raise
ValueError
(
"The number of videos does not match the number of {} tokens"
.
format
(
VIDEO_PLACEHOLDER
))
return
messages
@
override
def
get_mm_inputs
(
self
,
images
:
Sequence
[
"ImageInput"
],
videos
:
Sequence
[
"VideoInput"
],
imglens
:
Sequence
[
int
],
vidlens
:
Sequence
[
int
],
seqlens
:
Sequence
[
int
],
processor
:
Optional
[
"ProcessorMixin"
],
)
->
Dict
[
str
,
Union
[
List
[
int
],
"torch.Tensor"
]]:
self
.
_validate_input
(
images
,
videos
)
return
self
.
_get_mm_inputs
(
images
,
videos
,
processor
)
class
VideoLlavaPlugin
(
BasePlugin
):
@
override
def
process_messages
(
self
,
messages
:
Sequence
[
Dict
[
str
,
str
]],
images
:
Sequence
[
"ImageInput"
],
videos
:
Sequence
[
"VideoInput"
],
processor
:
Optional
[
"ProcessorMixin"
],
)
->
List
[
Dict
[
str
,
str
]]:
self
.
_validate_input
(
images
,
videos
)
num_image_tokens
=
0
num_video_tokens
=
0
messages
=
deepcopy
(
messages
)
mm_inputs
=
self
.
_get_mm_inputs
(
images
,
videos
,
processor
)
num_frames
=
0
exist_images
=
"pixel_values_images"
in
mm_inputs
exist_videos
=
"pixel_values_videos"
in
mm_inputs
if
exist_videos
or
exist_images
:
if
exist_images
:
height
,
width
=
get_image_size
(
to_numpy_array
(
mm_inputs
.
get
(
"pixel_values_images"
)[
0
]))
num_frames
=
1
if
exist_videos
:
pixel_values_video
=
to_numpy_array
(
mm_inputs
.
get
(
"pixel_values_videos"
)[
0
])
height
,
width
=
get_image_size
(
pixel_values_video
[
0
])
num_frames
=
pixel_values_video
.
shape
[
0
]
# frame dim is always after batch dim
image_seqlen
=
(
height
//
processor
.
patch_size
)
*
(
width
//
processor
.
patch_size
)
+
1
video_seqlen
=
image_seqlen
*
num_frames
if
processor
.
vision_feature_select_strategy
==
"default"
:
image_seqlen
-=
1
for
message
in
messages
:
content
=
message
[
"content"
]
while
self
.
image_token
in
content
:
num_image_tokens
+=
1
content
=
content
.
replace
(
self
.
image_token
,
"{{image}}"
,
1
)
while
self
.
video_token
in
content
:
num_video_tokens
+=
1
content
=
content
.
replace
(
self
.
video_token
,
"{{video}}"
,
1
)
content
=
content
.
replace
(
"{{image}}"
,
self
.
image_token
*
image_seqlen
)
message
[
"content"
]
=
content
.
replace
(
"{{video}}"
,
self
.
video_token
*
video_seqlen
)
if
len
(
images
)
!=
num_image_tokens
:
raise
ValueError
(
"The number of images does not match the number of {} tokens"
.
format
(
self
.
image_token
))
if
len
(
videos
)
!=
num_video_tokens
:
raise
ValueError
(
"The number of videos does not match the number of {} tokens"
.
format
(
self
.
video_token
))
return
messages
@
override
def
get_mm_inputs
(
self
,
images
:
Sequence
[
"ImageInput"
],
videos
:
Sequence
[
"VideoInput"
],
imglens
:
Sequence
[
int
],
vidlens
:
Sequence
[
int
],
seqlens
:
Sequence
[
int
],
processor
:
Optional
[
"ProcessorMixin"
],
)
->
Dict
[
str
,
Union
[
List
[
int
],
"torch.Tensor"
]]:
self
.
_validate_input
(
images
,
videos
)
return
self
.
_get_mm_inputs
(
images
,
videos
,
processor
)
PLUGINS
=
{
"base"
:
BasePlugin
,
"llava"
:
LlavaPlugin
,
"llava_next"
:
LlavaNextPlugin
,
"llava_next_video"
:
LlavaNextVideoPlugin
,
"paligemma"
:
PaliGemmaPlugin
,
"qwen2_vl"
:
Qwen2vlPlugin
,
"video_llava"
:
VideoLlavaPlugin
,
}
def
get_mm_plugin
(
name
:
str
,
image_token
:
Optional
[
str
]
=
None
,
video_token
:
Optional
[
str
]
=
None
,
)
->
"BasePlugin"
:
plugin_class
=
PLUGINS
.
get
(
name
,
None
)
if
plugin_class
is
None
:
raise
ValueError
(
"Multimodal plugin `{}` not found."
.
format
(
name
))
return
plugin_class
(
image_token
,
video_token
)
LLaMA-Factory/src/llamafactory/data/parser.py
0 → 100644
View file @
802ef8b7
# Copyright 2024 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
json
import
os
from
dataclasses
import
dataclass
from
typing
import
Any
,
Dict
,
List
,
Literal
,
Optional
,
Sequence
from
transformers.utils
import
cached_file
from
..extras.constants
import
DATA_CONFIG
from
..extras.misc
import
use_modelscope
@
dataclass
class
DatasetAttr
:
r
"""
Dataset attributes.
"""
# basic configs
load_from
:
Literal
[
"hf_hub"
,
"ms_hub"
,
"script"
,
"file"
]
dataset_name
:
str
formatting
:
Literal
[
"alpaca"
,
"sharegpt"
]
=
"alpaca"
ranking
:
bool
=
False
# extra configs
subset
:
Optional
[
str
]
=
None
split
:
str
=
"train"
folder
:
Optional
[
str
]
=
None
num_samples
:
Optional
[
int
]
=
None
# common columns
system
:
Optional
[
str
]
=
None
tools
:
Optional
[
str
]
=
None
images
:
Optional
[
str
]
=
None
videos
:
Optional
[
str
]
=
None
# rlhf columns
chosen
:
Optional
[
str
]
=
None
rejected
:
Optional
[
str
]
=
None
kto_tag
:
Optional
[
str
]
=
None
# alpaca columns
prompt
:
Optional
[
str
]
=
"instruction"
query
:
Optional
[
str
]
=
"input"
response
:
Optional
[
str
]
=
"output"
history
:
Optional
[
str
]
=
None
# sharegpt columns
messages
:
Optional
[
str
]
=
"conversations"
# sharegpt tags
role_tag
:
Optional
[
str
]
=
"from"
content_tag
:
Optional
[
str
]
=
"value"
user_tag
:
Optional
[
str
]
=
"human"
assistant_tag
:
Optional
[
str
]
=
"gpt"
observation_tag
:
Optional
[
str
]
=
"observation"
function_tag
:
Optional
[
str
]
=
"function_call"
system_tag
:
Optional
[
str
]
=
"system"
def
__repr__
(
self
)
->
str
:
return
self
.
dataset_name
def
set_attr
(
self
,
key
:
str
,
obj
:
Dict
[
str
,
Any
],
default
:
Optional
[
Any
]
=
None
)
->
None
:
setattr
(
self
,
key
,
obj
.
get
(
key
,
default
))
def
get_dataset_list
(
dataset_names
:
Optional
[
Sequence
[
str
]],
dataset_dir
:
str
)
->
List
[
"DatasetAttr"
]:
r
"""
Gets the attributes of the datasets.
"""
if
dataset_names
is
None
:
dataset_names
=
[]
if
dataset_dir
==
"ONLINE"
:
dataset_info
=
None
else
:
if
dataset_dir
.
startswith
(
"REMOTE:"
):
config_path
=
cached_file
(
path_or_repo_id
=
dataset_dir
[
7
:],
filename
=
DATA_CONFIG
,
repo_type
=
"dataset"
)
else
:
config_path
=
os
.
path
.
join
(
dataset_dir
,
DATA_CONFIG
)
try
:
with
open
(
config_path
,
"r"
)
as
f
:
dataset_info
=
json
.
load
(
f
)
except
Exception
as
err
:
if
len
(
dataset_names
)
!=
0
:
raise
ValueError
(
"Cannot open {} due to {}."
.
format
(
config_path
,
str
(
err
)))
dataset_info
=
None
dataset_list
:
List
[
"DatasetAttr"
]
=
[]
for
name
in
dataset_names
:
if
dataset_info
is
None
:
# dataset_dir is ONLINE
load_from
=
"ms_hub"
if
use_modelscope
()
else
"hf_hub"
dataset_attr
=
DatasetAttr
(
load_from
,
dataset_name
=
name
)
dataset_list
.
append
(
dataset_attr
)
continue
if
name
not
in
dataset_info
:
raise
ValueError
(
"Undefined dataset {} in {}."
.
format
(
name
,
DATA_CONFIG
))
has_hf_url
=
"hf_hub_url"
in
dataset_info
[
name
]
has_ms_url
=
"ms_hub_url"
in
dataset_info
[
name
]
if
has_hf_url
or
has_ms_url
:
if
(
use_modelscope
()
and
has_ms_url
)
or
(
not
has_hf_url
):
dataset_attr
=
DatasetAttr
(
"ms_hub"
,
dataset_name
=
dataset_info
[
name
][
"ms_hub_url"
])
else
:
dataset_attr
=
DatasetAttr
(
"hf_hub"
,
dataset_name
=
dataset_info
[
name
][
"hf_hub_url"
])
elif
"script_url"
in
dataset_info
[
name
]:
dataset_attr
=
DatasetAttr
(
"script"
,
dataset_name
=
dataset_info
[
name
][
"script_url"
])
else
:
dataset_attr
=
DatasetAttr
(
"file"
,
dataset_name
=
dataset_info
[
name
][
"file_name"
])
dataset_attr
.
set_attr
(
"formatting"
,
dataset_info
[
name
],
default
=
"alpaca"
)
dataset_attr
.
set_attr
(
"ranking"
,
dataset_info
[
name
],
default
=
False
)
dataset_attr
.
set_attr
(
"subset"
,
dataset_info
[
name
])
dataset_attr
.
set_attr
(
"split"
,
dataset_info
[
name
],
default
=
"train"
)
dataset_attr
.
set_attr
(
"folder"
,
dataset_info
[
name
])
dataset_attr
.
set_attr
(
"num_samples"
,
dataset_info
[
name
])
if
"columns"
in
dataset_info
[
name
]:
column_names
=
[
"system"
,
"tools"
,
"images"
,
"videos"
,
"chosen"
,
"rejected"
,
"kto_tag"
]
if
dataset_attr
.
formatting
==
"alpaca"
:
column_names
.
extend
([
"prompt"
,
"query"
,
"response"
,
"history"
])
else
:
column_names
.
extend
([
"messages"
])
for
column_name
in
column_names
:
dataset_attr
.
set_attr
(
column_name
,
dataset_info
[
name
][
"columns"
])
if
dataset_attr
.
formatting
==
"sharegpt"
and
"tags"
in
dataset_info
[
name
]:
tag_names
=
(
"role_tag"
,
"content_tag"
,
"user_tag"
,
"assistant_tag"
,
"observation_tag"
,
"function_tag"
,
"system_tag"
,
)
for
tag
in
tag_names
:
dataset_attr
.
set_attr
(
tag
,
dataset_info
[
name
][
"tags"
])
dataset_list
.
append
(
dataset_attr
)
return
dataset_list
Prev
1
…
3
4
5
6
7
8
9
10
11
…
14
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment