Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
norm
vllm
Commits
5ffc0d13
Unverified
Commit
5ffc0d13
authored
Nov 20, 2023
by
Simon Mo
Committed by
GitHub
Nov 20, 2023
Browse files
Migrate linter from `pylint` to `ruff` (#1665)
parent
112627e8
Changes
45
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
24 additions
and
79 deletions
+24
-79
vllm/model_executor/layers/attention.py
vllm/model_executor/layers/attention.py
+0
-1
vllm/model_executor/layers/quantization/awq.py
vllm/model_executor/layers/quantization/awq.py
+1
-1
vllm/model_executor/model_loader.py
vllm/model_executor/model_loader.py
+1
-1
vllm/model_executor/models/aquila.py
vllm/model_executor/models/aquila.py
+1
-4
vllm/model_executor/models/baichuan.py
vllm/model_executor/models/baichuan.py
+1
-4
vllm/model_executor/models/bloom.py
vllm/model_executor/models/bloom.py
+1
-4
vllm/model_executor/models/chatglm.py
vllm/model_executor/models/chatglm.py
+1
-4
vllm/model_executor/models/falcon.py
vllm/model_executor/models/falcon.py
+1
-4
vllm/model_executor/models/gpt2.py
vllm/model_executor/models/gpt2.py
+1
-4
vllm/model_executor/models/gpt_bigcode.py
vllm/model_executor/models/gpt_bigcode.py
+1
-4
vllm/model_executor/models/gpt_j.py
vllm/model_executor/models/gpt_j.py
+2
-8
vllm/model_executor/models/gpt_neox.py
vllm/model_executor/models/gpt_neox.py
+1
-4
vllm/model_executor/models/internlm.py
vllm/model_executor/models/internlm.py
+1
-4
vllm/model_executor/models/llama.py
vllm/model_executor/models/llama.py
+1
-4
vllm/model_executor/models/mistral.py
vllm/model_executor/models/mistral.py
+1
-4
vllm/model_executor/models/mpt.py
vllm/model_executor/models/mpt.py
+5
-8
vllm/model_executor/models/opt.py
vllm/model_executor/models/opt.py
+1
-4
vllm/model_executor/models/phi_1_5.py
vllm/model_executor/models/phi_1_5.py
+1
-4
vllm/model_executor/models/qwen.py
vllm/model_executor/models/qwen.py
+1
-4
vllm/model_executor/models/yi.py
vllm/model_executor/models/yi.py
+1
-4
No files found.
vllm/model_executor/layers/attention.py
View file @
5ffc0d13
...
...
@@ -18,7 +18,6 @@ _PARTITION_SIZE = 512
class
PagedAttention
(
nn
.
Module
):
# pylint: disable=line-too-long
"""GPT-style multi-head PagedAttention.
This class takes query, key, and value tensors as input. The input tensors
...
...
vllm/model_executor/layers/quantization/awq.py
View file @
5ffc0d13
...
...
@@ -50,7 +50,7 @@ class AWQConfig(QuantizationConfig):
def
get_config_filenames
()
->
List
[
str
]:
return
[
"quant_config.json"
,
# E.g., casperhansen/vicuna-7b-v1.5-awq
"quantize_config.json"
,
# E.g., abhinavkulkarni/mosaicml-mpt-7b-instruct-w4-g128-awq
# pylint: disable=line-too-long
"quantize_config.json"
,
# E.g., abhinavkulkarni/mosaicml-mpt-7b-instruct-w4-g128-awq
]
@
classmethod
...
...
vllm/model_executor/model_loader.py
View file @
5ffc0d13
...
...
@@ -7,7 +7,7 @@ import torch.nn as nn
from
transformers
import
PretrainedConfig
from
vllm.config
import
ModelConfig
from
vllm.model_executor.models
import
*
# pylint: disable=wildcard-import
from
vllm.model_executor.models
import
*
from
vllm.model_executor.weight_utils
import
(
get_quant_config
,
initialize_dummy_weights
)
...
...
vllm/model_executor/models/aquila.py
View file @
5ffc0d13
...
...
@@ -261,10 +261,7 @@ class AquilaModel(nn.Module):
)
->
torch
.
Tensor
:
hidden_states
=
self
.
embed_tokens
(
input_ids
)
for
i
in
range
(
len
(
self
.
layers
)):
if
cache_events
is
None
:
cache_event
=
None
else
:
cache_event
=
cache_events
[
i
]
cache_event
=
None
if
cache_events
is
None
else
cache_events
[
i
]
layer
=
self
.
layers
[
i
]
hidden_states
=
layer
(
positions
,
...
...
vllm/model_executor/models/baichuan.py
View file @
5ffc0d13
...
...
@@ -281,10 +281,7 @@ class BaiChuanModel(nn.Module):
hidden_states
=
self
.
embed_tokens
(
input_ids
)
residual
=
None
for
i
in
range
(
len
(
self
.
layers
)):
if
cache_events
is
None
:
cache_event
=
None
else
:
cache_event
=
cache_events
[
i
]
cache_event
=
None
if
cache_events
is
None
else
cache_events
[
i
]
layer
=
self
.
layers
[
i
]
hidden_states
,
residual
=
layer
(
positions
,
...
...
vllm/model_executor/models/bloom.py
View file @
5ffc0d13
...
...
@@ -256,10 +256,7 @@ class BloomModel(nn.Module):
hidden_states
=
self
.
word_embeddings
(
input_ids
)
hidden_states
=
self
.
word_embeddings_layernorm
(
hidden_states
)
for
i
in
range
(
len
(
self
.
h
)):
if
cache_events
is
None
:
cache_event
=
None
else
:
cache_event
=
cache_events
[
i
]
cache_event
=
None
if
cache_events
is
None
else
cache_events
[
i
]
layer
=
self
.
h
[
i
]
hidden_states
=
layer
(
position_ids
,
...
...
vllm/model_executor/models/chatglm.py
View file @
5ffc0d13
...
...
@@ -269,10 +269,7 @@ class GLMTransformer(nn.Module):
cache_events
:
Optional
[
List
[
torch
.
cuda
.
Event
]],
)
->
torch
.
Tensor
:
for
i
in
range
(
self
.
num_layers
):
if
cache_events
is
None
:
cache_event
=
None
else
:
cache_event
=
cache_events
[
i
]
cache_event
=
None
if
cache_events
is
None
else
cache_events
[
i
]
layer
=
self
.
layers
[
i
]
hidden_states
=
layer
(
hidden_states
=
hidden_states
,
...
...
vllm/model_executor/models/falcon.py
View file @
5ffc0d13
...
...
@@ -353,10 +353,7 @@ class FalconModel(nn.Module):
)
->
torch
.
Tensor
:
hidden_states
=
self
.
word_embeddings
(
input_ids
)
for
i
in
range
(
len
(
self
.
h
)):
if
cache_events
is
None
:
cache_event
=
None
else
:
cache_event
=
cache_events
[
i
]
cache_event
=
None
if
cache_events
is
None
else
cache_events
[
i
]
layer
=
self
.
h
[
i
]
hidden_states
=
layer
(
positions
,
...
...
vllm/model_executor/models/gpt2.py
View file @
5ffc0d13
...
...
@@ -206,10 +206,7 @@ class GPT2Model(nn.Module):
hidden_states
=
inputs_embeds
+
position_embeds
for
i
in
range
(
len
(
self
.
h
)):
if
cache_events
is
None
:
cache_event
=
None
else
:
cache_event
=
cache_events
[
i
]
cache_event
=
None
if
cache_events
is
None
else
cache_events
[
i
]
layer
=
self
.
h
[
i
]
hidden_states
=
layer
(
hidden_states
,
kv_caches
[
i
],
input_metadata
,
cache_event
)
...
...
vllm/model_executor/models/gpt_bigcode.py
View file @
5ffc0d13
...
...
@@ -225,10 +225,7 @@ class GPTBigCodeModel(nn.Module):
hidden_states
=
inputs_embeds
+
position_embeds
for
i
in
range
(
len
(
self
.
h
)):
if
cache_events
is
None
:
cache_event
=
None
else
:
cache_event
=
cache_events
[
i
]
cache_event
=
None
if
cache_events
is
None
else
cache_events
[
i
]
layer
=
self
.
h
[
i
]
hidden_states
=
layer
(
hidden_states
,
kv_caches
[
i
],
input_metadata
,
cache_event
)
...
...
vllm/model_executor/models/gpt_j.py
View file @
5ffc0d13
...
...
@@ -147,10 +147,7 @@ class GPTJBlock(nn.Module):
linear_method
:
Optional
[
LinearMethodBase
]
=
None
,
):
super
().
__init__
()
if
config
.
n_inner
is
None
:
inner_dim
=
4
*
config
.
n_embd
else
:
inner_dim
=
config
.
n_inner
inner_dim
=
4
*
config
.
n_embd
if
config
.
n_inner
is
None
else
config
.
n_inner
self
.
ln_1
=
nn
.
LayerNorm
(
config
.
n_embd
,
eps
=
config
.
layer_norm_epsilon
)
self
.
attn
=
GPTJAttention
(
config
,
linear_method
)
self
.
mlp
=
GPTJMLP
(
inner_dim
,
config
,
linear_method
)
...
...
@@ -205,10 +202,7 @@ class GPTJModel(nn.Module):
)
->
torch
.
Tensor
:
hidden_states
=
self
.
wte
(
input_ids
)
for
i
in
range
(
len
(
self
.
h
)):
if
cache_events
is
None
:
cache_event
=
None
else
:
cache_event
=
cache_events
[
i
]
cache_event
=
None
if
cache_events
is
None
else
cache_events
[
i
]
layer
=
self
.
h
[
i
]
hidden_states
=
layer
(
position_ids
,
...
...
vllm/model_executor/models/gpt_neox.py
View file @
5ffc0d13
...
...
@@ -216,10 +216,7 @@ class GPTNeoXModel(nn.Module):
)
->
torch
.
Tensor
:
hidden_states
=
self
.
embed_in
(
input_ids
)
for
i
in
range
(
len
(
self
.
layers
)):
if
cache_events
is
None
:
cache_event
=
None
else
:
cache_event
=
cache_events
[
i
]
cache_event
=
None
if
cache_events
is
None
else
cache_events
[
i
]
layer
=
self
.
layers
[
i
]
hidden_states
=
layer
(
position_ids
,
...
...
vllm/model_executor/models/internlm.py
View file @
5ffc0d13
...
...
@@ -213,10 +213,7 @@ class InternLMModel(nn.Module):
hidden_states
=
self
.
embed_tokens
(
input_ids
)
residual
=
None
for
i
in
range
(
len
(
self
.
layers
)):
if
cache_events
is
None
:
cache_event
=
None
else
:
cache_event
=
cache_events
[
i
]
cache_event
=
None
if
cache_events
is
None
else
cache_events
[
i
]
layer
=
self
.
layers
[
i
]
hidden_states
,
residual
=
layer
(
positions
,
...
...
vllm/model_executor/models/llama.py
View file @
5ffc0d13
...
...
@@ -253,10 +253,7 @@ class LlamaModel(nn.Module):
hidden_states
=
self
.
embed_tokens
(
input_ids
)
residual
=
None
for
i
in
range
(
len
(
self
.
layers
)):
if
cache_events
is
None
:
cache_event
=
None
else
:
cache_event
=
cache_events
[
i
]
cache_event
=
None
if
cache_events
is
None
else
cache_events
[
i
]
layer
=
self
.
layers
[
i
]
hidden_states
,
residual
=
layer
(
positions
,
...
...
vllm/model_executor/models/mistral.py
View file @
5ffc0d13
...
...
@@ -248,10 +248,7 @@ class MistralModel(nn.Module):
hidden_states
=
self
.
embed_tokens
(
input_ids
)
residual
=
None
for
i
in
range
(
len
(
self
.
layers
)):
if
cache_events
is
None
:
cache_event
=
None
else
:
cache_event
=
cache_events
[
i
]
cache_event
=
None
if
cache_events
is
None
else
cache_events
[
i
]
layer
=
self
.
layers
[
i
]
hidden_states
,
residual
=
layer
(
positions
,
...
...
vllm/model_executor/models/mpt.py
View file @
5ffc0d13
...
...
@@ -203,10 +203,10 @@ class MPTModel(nn.Module):
self
.
norm_f
=
nn
.
LayerNorm
(
config
.
d_model
)
if
config
.
no_bias
:
for
module
in
self
.
modules
():
if
hasattr
(
module
,
"bias"
)
:
if
isinstance
(
module
.
bias
,
nn
.
Parameter
):
# Remove the bias term in Linear and LayerNorm.
module
.
register_parameter
(
"bias"
,
None
)
if
hasattr
(
module
,
"bias"
)
and
isinstance
(
module
.
bias
,
nn
.
Parameter
):
# Remove the bias term in Linear and LayerNorm.
module
.
register_parameter
(
"bias"
,
None
)
def
forward
(
self
,
...
...
@@ -218,10 +218,7 @@ class MPTModel(nn.Module):
)
->
torch
.
Tensor
:
hidden_states
=
self
.
wte
(
input_ids
)
for
i
in
range
(
len
(
self
.
blocks
)):
if
cache_events
is
None
:
cache_event
=
None
else
:
cache_event
=
cache_events
[
i
]
cache_event
=
None
if
cache_events
is
None
else
cache_events
[
i
]
block
=
self
.
blocks
[
i
]
hidden_states
=
block
(
position_ids
,
...
...
vllm/model_executor/models/opt.py
View file @
5ffc0d13
...
...
@@ -257,10 +257,7 @@ class OPTDecoder(nn.Module):
hidden_states
=
inputs_embeds
+
pos_embeds
for
i
in
range
(
len
(
self
.
layers
)):
if
cache_events
is
None
:
cache_event
=
None
else
:
cache_event
=
cache_events
[
i
]
cache_event
=
None
if
cache_events
is
None
else
cache_events
[
i
]
layer
=
self
.
layers
[
i
]
hidden_states
=
layer
(
hidden_states
,
kv_caches
[
i
],
input_metadata
,
cache_event
)
...
...
vllm/model_executor/models/phi_1_5.py
View file @
5ffc0d13
...
...
@@ -258,10 +258,7 @@ class PhiModel(nn.Module):
)
->
SamplerOutput
:
hidden_states
=
self
.
embd
(
input_ids
)
for
i
in
range
(
self
.
config
.
num_hidden_layers
):
if
cache_events
is
None
:
cache_event
=
None
else
:
cache_event
=
cache_events
[
i
]
cache_event
=
None
if
cache_events
is
None
else
cache_events
[
i
]
layer
=
self
.
h
[
i
]
hidden_states
=
layer
(
positions
,
...
...
vllm/model_executor/models/qwen.py
View file @
5ffc0d13
...
...
@@ -213,10 +213,7 @@ class QWenModel(nn.Module):
hidden_states
=
self
.
wte
(
input_ids
)
residual
=
None
for
i
in
range
(
len
(
self
.
h
)):
if
cache_events
is
None
:
cache_event
=
None
else
:
cache_event
=
cache_events
[
i
]
cache_event
=
None
if
cache_events
is
None
else
cache_events
[
i
]
layer
=
self
.
h
[
i
]
hidden_states
,
residual
=
layer
(
positions
,
...
...
vllm/model_executor/models/yi.py
View file @
5ffc0d13
...
...
@@ -249,10 +249,7 @@ class YiModel(nn.Module):
hidden_states
=
self
.
embed_tokens
(
input_ids
)
residual
=
None
for
i
in
range
(
len
(
self
.
layers
)):
if
cache_events
is
None
:
cache_event
=
None
else
:
cache_event
=
cache_events
[
i
]
cache_event
=
None
if
cache_events
is
None
else
cache_events
[
i
]
layer
=
self
.
layers
[
i
]
hidden_states
,
residual
=
layer
(
positions
,
...
...
Prev
1
2
3
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment