Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
a6d8a149
Unverified
Commit
a6d8a149
authored
Feb 02, 2023
by
Yih-Dar
Committed by
GitHub
Feb 02, 2023
Browse files
Fix some pipeline tests (#21401)
* fix Co-authored-by:
ydshieh
<
ydshieh@users.noreply.github.com
>
parent
145bf41c
Changes
20
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
69 additions
and
37 deletions
+69
-37
src/transformers/pipelines/__init__.py
src/transformers/pipelines/__init__.py
+24
-1
src/transformers/pipelines/base.py
src/transformers/pipelines/base.py
+8
-1
src/transformers/pipelines/depth_estimation.py
src/transformers/pipelines/depth_estimation.py
+1
-1
src/transformers/pipelines/document_question_answering.py
src/transformers/pipelines/document_question_answering.py
+6
-2
src/transformers/pipelines/image_classification.py
src/transformers/pipelines/image_classification.py
+1
-1
src/transformers/pipelines/image_segmentation.py
src/transformers/pipelines/image_segmentation.py
+0
-6
src/transformers/pipelines/image_to_text.py
src/transformers/pipelines/image_to_text.py
+1
-1
src/transformers/pipelines/object_detection.py
src/transformers/pipelines/object_detection.py
+2
-4
src/transformers/pipelines/video_classification.py
src/transformers/pipelines/video_classification.py
+1
-1
src/transformers/pipelines/visual_question_answering.py
src/transformers/pipelines/visual_question_answering.py
+1
-1
src/transformers/pipelines/zero_shot_image_classification.py
src/transformers/pipelines/zero_shot_image_classification.py
+1
-1
src/transformers/pipelines/zero_shot_object_detection.py
src/transformers/pipelines/zero_shot_object_detection.py
+2
-2
tests/pipelines/test_pipelines_common.py
tests/pipelines/test_pipelines_common.py
+12
-0
tests/pipelines/test_pipelines_depth_estimation.py
tests/pipelines/test_pipelines_depth_estimation.py
+1
-1
tests/pipelines/test_pipelines_document_question_answering.py
...s/pipelines/test_pipelines_document_question_answering.py
+2
-7
tests/pipelines/test_pipelines_image_classification.py
tests/pipelines/test_pipelines_image_classification.py
+1
-1
tests/pipelines/test_pipelines_image_segmentation.py
tests/pipelines/test_pipelines_image_segmentation.py
+2
-3
tests/pipelines/test_pipelines_image_to_text.py
tests/pipelines/test_pipelines_image_to_text.py
+1
-1
tests/pipelines/test_pipelines_object_detection.py
tests/pipelines/test_pipelines_object_detection.py
+1
-1
tests/pipelines/test_pipelines_video_classification.py
tests/pipelines/test_pipelines_video_classification.py
+1
-1
No files found.
src/transformers/pipelines/__init__.py
View file @
a6d8a149
...
@@ -387,8 +387,11 @@ for task, values in SUPPORTED_TASKS.items():
...
@@ -387,8 +387,11 @@ for task, values in SUPPORTED_TASKS.items():
if
values
[
"type"
]
==
"text"
:
if
values
[
"type"
]
==
"text"
:
NO_FEATURE_EXTRACTOR_TASKS
.
add
(
task
)
NO_FEATURE_EXTRACTOR_TASKS
.
add
(
task
)
NO_IMAGE_PROCESSOR_TASKS
.
add
(
task
)
NO_IMAGE_PROCESSOR_TASKS
.
add
(
task
)
elif
values
[
"type"
]
in
{
"audio"
,
"image"
,
"video"
}:
elif
values
[
"type"
]
in
{
"image"
,
"video"
}:
NO_TOKENIZER_TASKS
.
add
(
task
)
NO_TOKENIZER_TASKS
.
add
(
task
)
elif
values
[
"type"
]
in
{
"audio"
}:
NO_TOKENIZER_TASKS
.
add
(
task
)
NO_IMAGE_PROCESSOR_TASKS
.
add
(
task
)
elif
values
[
"type"
]
!=
"multimodal"
:
elif
values
[
"type"
]
!=
"multimodal"
:
raise
ValueError
(
f
"SUPPORTED_TASK
{
task
}
contains invalid type
{
values
[
'type'
]
}
"
)
raise
ValueError
(
f
"SUPPORTED_TASK
{
task
}
contains invalid type
{
values
[
'type'
]
}
"
)
...
@@ -773,6 +776,14 @@ def pipeline(
...
@@ -773,6 +776,14 @@ def pipeline(
load_feature_extractor
=
type
(
model_config
)
in
FEATURE_EXTRACTOR_MAPPING
or
feature_extractor
is
not
None
load_feature_extractor
=
type
(
model_config
)
in
FEATURE_EXTRACTOR_MAPPING
or
feature_extractor
is
not
None
load_image_processor
=
type
(
model_config
)
in
IMAGE_PROCESSOR_MAPPING
or
image_processor
is
not
None
load_image_processor
=
type
(
model_config
)
in
IMAGE_PROCESSOR_MAPPING
or
image_processor
is
not
None
# If `model` (instance of `PretrainedModel` instead of `str`) is passed (and/or same for config), while
# `image_processor` or `feature_extractor` is `None`, the loading will fail. This happens particularly for some
# vision tasks when calling `pipeline()` with `model` and only one of the `image_processor` and `feature_extractor`.
# TODO: we need to make `NO_IMAGE_PROCESSOR_TASKS` and `NO_FEATURE_EXTRACTOR_TASKS` more robust to avoid such issue.
# This block is only temporarily to make CI green.
if
load_image_processor
and
load_feature_extractor
:
load_feature_extractor
=
False
if
(
if
(
tokenizer
is
None
tokenizer
is
None
and
not
load_tokenizer
and
not
load_tokenizer
...
@@ -784,6 +795,18 @@ def pipeline(
...
@@ -784,6 +795,18 @@ def pipeline(
# so the model_config might not define a tokenizer, but it seems to be
# so the model_config might not define a tokenizer, but it seems to be
# necessary for the task, so we're force-trying to load it.
# necessary for the task, so we're force-trying to load it.
load_tokenizer
=
True
load_tokenizer
=
True
if
(
image_processor
is
None
and
not
load_image_processor
and
normalized_task
not
in
NO_IMAGE_PROCESSOR_TASKS
# Using class name to avoid importing the real class.
and
model_config
.
__class__
.
__name__
in
MULTI_MODEL_CONFIGS
and
normalized_task
!=
"automatic-speech-recognition"
):
# This is a special category of models, that are fusions of multiple models
# so the model_config might not define a tokenizer, but it seems to be
# necessary for the task, so we're force-trying to load it.
load_image_processor
=
True
if
(
if
(
feature_extractor
is
None
feature_extractor
is
None
and
not
load_feature_extractor
and
not
load_feature_extractor
...
...
src/transformers/pipelines/base.py
View file @
a6d8a149
...
@@ -77,7 +77,7 @@ def _pad(items, key, padding_value, padding_side):
...
@@ -77,7 +77,7 @@ def _pad(items, key, padding_value, padding_side):
# Others include `attention_mask` etc...
# Others include `attention_mask` etc...
shape
=
items
[
0
][
key
].
shape
shape
=
items
[
0
][
key
].
shape
dim
=
len
(
shape
)
dim
=
len
(
shape
)
if
key
==
"pixel_values"
:
if
key
in
[
"pixel_values"
,
"image"
]
:
# This is probable image so padding shouldn't be necessary
# This is probable image so padding shouldn't be necessary
# B, C, H, W
# B, C, H, W
return
torch
.
cat
([
item
[
key
]
for
item
in
items
],
dim
=
0
)
return
torch
.
cat
([
item
[
key
]
for
item
in
items
],
dim
=
0
)
...
@@ -792,6 +792,13 @@ class Pipeline(_ScikitCompat):
...
@@ -792,6 +792,13 @@ class Pipeline(_ScikitCompat):
self
.
_num_workers
=
kwargs
.
pop
(
"num_workers"
,
None
)
self
.
_num_workers
=
kwargs
.
pop
(
"num_workers"
,
None
)
self
.
_preprocess_params
,
self
.
_forward_params
,
self
.
_postprocess_params
=
self
.
_sanitize_parameters
(
**
kwargs
)
self
.
_preprocess_params
,
self
.
_forward_params
,
self
.
_postprocess_params
=
self
.
_sanitize_parameters
(
**
kwargs
)
if
self
.
image_processor
is
None
and
self
.
feature_extractor
is
not
None
:
if
isinstance
(
self
.
feature_extractor
,
BaseImageProcessor
):
# Backward compatible change, if users called
# ImageSegmentationPipeline(.., feature_extractor=MyFeatureExtractor())
# then we should keep working
self
.
image_processor
=
self
.
feature_extractor
def
save_pretrained
(
self
,
save_directory
:
str
):
def
save_pretrained
(
self
,
save_directory
:
str
):
"""
"""
Save the pipeline's model and tokenizer.
Save the pipeline's model and tokenizer.
...
...
src/transformers/pipelines/depth_estimation.py
View file @
a6d8a149
...
@@ -87,7 +87,7 @@ class DepthEstimationPipeline(Pipeline):
...
@@ -87,7 +87,7 @@ class DepthEstimationPipeline(Pipeline):
def
preprocess
(
self
,
image
):
def
preprocess
(
self
,
image
):
image
=
load_image
(
image
)
image
=
load_image
(
image
)
self
.
image_size
=
image
.
size
self
.
image_size
=
image
.
size
model_inputs
=
self
.
feature_extract
or
(
images
=
image
,
return_tensors
=
self
.
framework
)
model_inputs
=
self
.
image_process
or
(
images
=
image
,
return_tensors
=
self
.
framework
)
return
model_inputs
return
model_inputs
def
_forward
(
self
,
model_inputs
):
def
_forward
(
self
,
model_inputs
):
...
...
src/transformers/pipelines/document_question_answering.py
View file @
a6d8a149
...
@@ -281,7 +281,9 @@ class DocumentQuestionAnsweringPipeline(ChunkPipeline):
...
@@ -281,7 +281,9 @@ class DocumentQuestionAnsweringPipeline(ChunkPipeline):
image_features
=
{}
image_features
=
{}
if
input
.
get
(
"image"
,
None
)
is
not
None
:
if
input
.
get
(
"image"
,
None
)
is
not
None
:
image
=
load_image
(
input
[
"image"
])
image
=
load_image
(
input
[
"image"
])
if
self
.
feature_extractor
is
not
None
:
if
self
.
image_processor
is
not
None
:
image_features
.
update
(
self
.
image_processor
(
images
=
image
,
return_tensors
=
self
.
framework
))
elif
self
.
feature_extractor
is
not
None
:
image_features
.
update
(
self
.
feature_extractor
(
images
=
image
,
return_tensors
=
self
.
framework
))
image_features
.
update
(
self
.
feature_extractor
(
images
=
image
,
return_tensors
=
self
.
framework
))
elif
self
.
model_type
==
ModelType
.
VisionEncoderDecoder
:
elif
self
.
model_type
==
ModelType
.
VisionEncoderDecoder
:
raise
ValueError
(
"If you are using a VisionEncoderDecoderModel, you must provide a feature extractor"
)
raise
ValueError
(
"If you are using a VisionEncoderDecoderModel, you must provide a feature extractor"
)
...
@@ -352,7 +354,9 @@ class DocumentQuestionAnsweringPipeline(ChunkPipeline):
...
@@ -352,7 +354,9 @@ class DocumentQuestionAnsweringPipeline(ChunkPipeline):
return_overflowing_tokens
=
True
,
return_overflowing_tokens
=
True
,
**
tokenizer_kwargs
,
**
tokenizer_kwargs
,
)
)
encoding
.
pop
(
"overflow_to_sample_mapping"
)
# We do not use this
# TODO: check why slower `LayoutLMTokenizer` and `LayoutLMv2Tokenizer` don't have this key in outputs
# FIXME: ydshieh and/or Narsil
encoding
.
pop
(
"overflow_to_sample_mapping"
,
None
)
# We do not use this
num_spans
=
len
(
encoding
[
"input_ids"
])
num_spans
=
len
(
encoding
[
"input_ids"
])
...
...
src/transformers/pipelines/image_classification.py
View file @
a6d8a149
...
@@ -101,7 +101,7 @@ class ImageClassificationPipeline(Pipeline):
...
@@ -101,7 +101,7 @@ class ImageClassificationPipeline(Pipeline):
def
preprocess
(
self
,
image
):
def
preprocess
(
self
,
image
):
image
=
load_image
(
image
)
image
=
load_image
(
image
)
model_inputs
=
self
.
feature_extract
or
(
images
=
image
,
return_tensors
=
self
.
framework
)
model_inputs
=
self
.
image_process
or
(
images
=
image
,
return_tensors
=
self
.
framework
)
return
model_inputs
return
model_inputs
def
_forward
(
self
,
model_inputs
):
def
_forward
(
self
,
model_inputs
):
...
...
src/transformers/pipelines/image_segmentation.py
View file @
a6d8a149
...
@@ -67,12 +67,6 @@ class ImageSegmentationPipeline(Pipeline):
...
@@ -67,12 +67,6 @@ class ImageSegmentationPipeline(Pipeline):
def
__init__
(
self
,
*
args
,
**
kwargs
):
def
__init__
(
self
,
*
args
,
**
kwargs
):
super
().
__init__
(
*
args
,
**
kwargs
)
super
().
__init__
(
*
args
,
**
kwargs
)
if
self
.
image_processor
is
None
and
self
.
feature_extractor
is
not
None
:
# Backward compatible change, if users called
# ImageSegmentationPipeline(.., feature_extractor=MyFeatureExtractor())
# then we should keep working
self
.
image_processor
=
self
.
feature_extractor
if
self
.
framework
==
"tf"
:
if
self
.
framework
==
"tf"
:
raise
ValueError
(
f
"The
{
self
.
__class__
}
is only available in PyTorch."
)
raise
ValueError
(
f
"The
{
self
.
__class__
}
is only available in PyTorch."
)
...
...
src/transformers/pipelines/image_to_text.py
View file @
a6d8a149
...
@@ -100,7 +100,7 @@ class ImageToTextPipeline(Pipeline):
...
@@ -100,7 +100,7 @@ class ImageToTextPipeline(Pipeline):
def
preprocess
(
self
,
image
):
def
preprocess
(
self
,
image
):
image
=
load_image
(
image
)
image
=
load_image
(
image
)
model_inputs
=
self
.
feature_extract
or
(
images
=
image
,
return_tensors
=
self
.
framework
)
model_inputs
=
self
.
image_process
or
(
images
=
image
,
return_tensors
=
self
.
framework
)
return
model_inputs
return
model_inputs
def
_forward
(
self
,
model_inputs
,
generate_kwargs
=
None
):
def
_forward
(
self
,
model_inputs
,
generate_kwargs
=
None
):
...
...
src/transformers/pipelines/object_detection.py
View file @
a6d8a149
...
@@ -97,7 +97,7 @@ class ObjectDetectionPipeline(Pipeline):
...
@@ -97,7 +97,7 @@ class ObjectDetectionPipeline(Pipeline):
def
preprocess
(
self
,
image
):
def
preprocess
(
self
,
image
):
image
=
load_image
(
image
)
image
=
load_image
(
image
)
target_size
=
torch
.
IntTensor
([[
image
.
height
,
image
.
width
]])
target_size
=
torch
.
IntTensor
([[
image
.
height
,
image
.
width
]])
inputs
=
self
.
feature_extract
or
(
images
=
[
image
],
return_tensors
=
"pt"
)
inputs
=
self
.
image_process
or
(
images
=
[
image
],
return_tensors
=
"pt"
)
if
self
.
tokenizer
is
not
None
:
if
self
.
tokenizer
is
not
None
:
inputs
=
self
.
tokenizer
(
text
=
inputs
[
"words"
],
boxes
=
inputs
[
"boxes"
],
return_tensors
=
"pt"
)
inputs
=
self
.
tokenizer
(
text
=
inputs
[
"words"
],
boxes
=
inputs
[
"boxes"
],
return_tensors
=
"pt"
)
inputs
[
"target_size"
]
=
target_size
inputs
[
"target_size"
]
=
target_size
...
@@ -137,9 +137,7 @@ class ObjectDetectionPipeline(Pipeline):
...
@@ -137,9 +137,7 @@ class ObjectDetectionPipeline(Pipeline):
annotation
=
[
dict
(
zip
(
keys
,
vals
))
for
vals
in
zip
(
scores
.
tolist
(),
labels
,
boxes
)
if
vals
[
0
]
>
threshold
]
annotation
=
[
dict
(
zip
(
keys
,
vals
))
for
vals
in
zip
(
scores
.
tolist
(),
labels
,
boxes
)
if
vals
[
0
]
>
threshold
]
else
:
else
:
# This is a regular ForObjectDetectionModel
# This is a regular ForObjectDetectionModel
raw_annotations
=
self
.
feature_extractor
.
post_process_object_detection
(
raw_annotations
=
self
.
image_processor
.
post_process_object_detection
(
model_outputs
,
threshold
,
target_size
)
model_outputs
,
threshold
,
target_size
)
raw_annotation
=
raw_annotations
[
0
]
raw_annotation
=
raw_annotations
[
0
]
scores
=
raw_annotation
[
"scores"
]
scores
=
raw_annotation
[
"scores"
]
labels
=
raw_annotation
[
"labels"
]
labels
=
raw_annotation
[
"labels"
]
...
...
src/transformers/pipelines/video_classification.py
View file @
a6d8a149
...
@@ -102,7 +102,7 @@ class VideoClassificationPipeline(Pipeline):
...
@@ -102,7 +102,7 @@ class VideoClassificationPipeline(Pipeline):
video
=
videoreader
.
get_batch
(
indices
).
asnumpy
()
video
=
videoreader
.
get_batch
(
indices
).
asnumpy
()
video
=
list
(
video
)
video
=
list
(
video
)
model_inputs
=
self
.
feature_extract
or
(
video
,
return_tensors
=
self
.
framework
)
model_inputs
=
self
.
image_process
or
(
video
,
return_tensors
=
self
.
framework
)
return
model_inputs
return
model_inputs
def
_forward
(
self
,
model_inputs
):
def
_forward
(
self
,
model_inputs
):
...
...
src/transformers/pipelines/visual_question_answering.py
View file @
a6d8a149
...
@@ -114,7 +114,7 @@ class VisualQuestionAnsweringPipeline(Pipeline):
...
@@ -114,7 +114,7 @@ class VisualQuestionAnsweringPipeline(Pipeline):
model_inputs
=
self
.
tokenizer
(
model_inputs
=
self
.
tokenizer
(
inputs
[
"question"
],
return_tensors
=
self
.
framework
,
padding
=
padding
,
truncation
=
truncation
inputs
[
"question"
],
return_tensors
=
self
.
framework
,
padding
=
padding
,
truncation
=
truncation
)
)
image_features
=
self
.
feature_extract
or
(
images
=
image
,
return_tensors
=
self
.
framework
)
image_features
=
self
.
image_process
or
(
images
=
image
,
return_tensors
=
self
.
framework
)
model_inputs
.
update
(
image_features
)
model_inputs
.
update
(
image_features
)
return
model_inputs
return
model_inputs
...
...
src/transformers/pipelines/zero_shot_image_classification.py
View file @
a6d8a149
...
@@ -110,7 +110,7 @@ class ZeroShotImageClassificationPipeline(ChunkPipeline):
...
@@ -110,7 +110,7 @@ class ZeroShotImageClassificationPipeline(ChunkPipeline):
n
=
len
(
candidate_labels
)
n
=
len
(
candidate_labels
)
for
i
,
candidate_label
in
enumerate
(
candidate_labels
):
for
i
,
candidate_label
in
enumerate
(
candidate_labels
):
image
=
load_image
(
image
)
image
=
load_image
(
image
)
images
=
self
.
feature_extract
or
(
images
=
[
image
],
return_tensors
=
self
.
framework
)
images
=
self
.
image_process
or
(
images
=
[
image
],
return_tensors
=
self
.
framework
)
sequence
=
hypothesis_template
.
format
(
candidate_label
)
sequence
=
hypothesis_template
.
format
(
candidate_label
)
inputs
=
self
.
tokenizer
(
sequence
,
return_tensors
=
self
.
framework
)
inputs
=
self
.
tokenizer
(
sequence
,
return_tensors
=
self
.
framework
)
inputs
[
"pixel_values"
]
=
images
.
pixel_values
inputs
[
"pixel_values"
]
=
images
.
pixel_values
...
...
src/transformers/pipelines/zero_shot_object_detection.py
View file @
a6d8a149
...
@@ -148,7 +148,7 @@ class ZeroShotObjectDetectionPipeline(ChunkPipeline):
...
@@ -148,7 +148,7 @@ class ZeroShotObjectDetectionPipeline(ChunkPipeline):
target_size
=
torch
.
tensor
([[
image
.
height
,
image
.
width
]],
dtype
=
torch
.
int32
)
target_size
=
torch
.
tensor
([[
image
.
height
,
image
.
width
]],
dtype
=
torch
.
int32
)
for
i
,
candidate_label
in
enumerate
(
candidate_labels
):
for
i
,
candidate_label
in
enumerate
(
candidate_labels
):
text_inputs
=
self
.
tokenizer
(
candidate_label
,
return_tensors
=
self
.
framework
)
text_inputs
=
self
.
tokenizer
(
candidate_label
,
return_tensors
=
self
.
framework
)
image_features
=
self
.
feature_extract
or
(
image
,
return_tensors
=
self
.
framework
)
image_features
=
self
.
image_process
or
(
image
,
return_tensors
=
self
.
framework
)
yield
{
yield
{
"is_last"
:
i
==
len
(
candidate_labels
)
-
1
,
"is_last"
:
i
==
len
(
candidate_labels
)
-
1
,
"target_size"
:
target_size
,
"target_size"
:
target_size
,
...
@@ -173,7 +173,7 @@ class ZeroShotObjectDetectionPipeline(ChunkPipeline):
...
@@ -173,7 +173,7 @@ class ZeroShotObjectDetectionPipeline(ChunkPipeline):
for
model_output
in
model_outputs
:
for
model_output
in
model_outputs
:
label
=
model_output
[
"candidate_label"
]
label
=
model_output
[
"candidate_label"
]
model_output
=
BaseModelOutput
(
model_output
)
model_output
=
BaseModelOutput
(
model_output
)
outputs
=
self
.
feature_extract
or
.
post_process_object_detection
(
outputs
=
self
.
image_process
or
.
post_process_object_detection
(
outputs
=
model_output
,
threshold
=
threshold
,
target_sizes
=
model_output
[
"target_size"
]
outputs
=
model_output
,
threshold
=
threshold
,
target_sizes
=
model_output
[
"target_size"
]
)[
0
]
)[
0
]
...
...
tests/pipelines/test_pipelines_common.py
View file @
a6d8a149
...
@@ -179,6 +179,18 @@ def is_test_to_skip(test_casse_name, config_class, model_architecture, tokenizer
...
@@ -179,6 +179,18 @@ def is_test_to_skip(test_casse_name, config_class, model_architecture, tokenizer
# fails this test case. Skip for now - a fix for this along with the initial changes in PR #20426 is
# fails this test case. Skip for now - a fix for this along with the initial changes in PR #20426 is
# too much. Let `ydshieh` to fix it ASAP once #20426 is merged.
# too much. Let `ydshieh` to fix it ASAP once #20426 is merged.
to_skip
=
True
to_skip
=
True
elif
config_class
.
__name__
==
"LayoutLMv2Config"
and
test_casse_name
in
[
"QAPipelineTests"
,
"TextClassificationPipelineTests"
,
"TokenClassificationPipelineTests"
,
"ZeroShotClassificationPipelineTests"
,
]:
# `LayoutLMv2Config` was never used in pipeline tests (`test_pt_LayoutLMv2Config_XXX`) due to lack of tiny
# config. With new tiny model creation, it is available, but we need to fix the failed tests.
to_skip
=
True
elif
test_casse_name
==
"DocumentQuestionAnsweringPipelineTests"
and
not
tokenizer_name
.
endswith
(
"Fast"
):
# This pipeline uses `sequence_ids()` which is only available for fast tokenizers.
to_skip
=
True
return
to_skip
return
to_skip
...
...
tests/pipelines/test_pipelines_depth_estimation.py
View file @
a6d8a149
...
@@ -48,7 +48,7 @@ class DepthEstimationPipelineTests(unittest.TestCase, metaclass=PipelineTestCase
...
@@ -48,7 +48,7 @@ class DepthEstimationPipelineTests(unittest.TestCase, metaclass=PipelineTestCase
model_mapping
=
MODEL_FOR_DEPTH_ESTIMATION_MAPPING
model_mapping
=
MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def
get_test_pipeline
(
self
,
model
,
tokenizer
,
processor
):
def
get_test_pipeline
(
self
,
model
,
tokenizer
,
processor
):
depth_estimator
=
DepthEstimationPipeline
(
model
=
model
,
feature_extract
or
=
processor
)
depth_estimator
=
DepthEstimationPipeline
(
model
=
model
,
image_process
or
=
processor
)
return
depth_estimator
,
[
return
depth_estimator
,
[
"./tests/fixtures/tests_samples/COCO/000000039769.png"
,
"./tests/fixtures/tests_samples/COCO/000000039769.png"
,
"./tests/fixtures/tests_samples/COCO/000000039769.png"
,
"./tests/fixtures/tests_samples/COCO/000000039769.png"
,
...
...
tests/pipelines/test_pipelines_document_question_answering.py
View file @
a6d8a149
...
@@ -61,7 +61,7 @@ class DocumentQuestionAnsweringPipelineTests(unittest.TestCase, metaclass=Pipeli
...
@@ -61,7 +61,7 @@ class DocumentQuestionAnsweringPipelineTests(unittest.TestCase, metaclass=Pipeli
@
require_vision
@
require_vision
def
get_test_pipeline
(
self
,
model
,
tokenizer
,
processor
):
def
get_test_pipeline
(
self
,
model
,
tokenizer
,
processor
):
dqa_pipeline
=
pipeline
(
dqa_pipeline
=
pipeline
(
"document-question-answering"
,
model
=
model
,
tokenizer
=
tokenizer
,
feature_extract
or
=
processor
"document-question-answering"
,
model
=
model
,
tokenizer
=
tokenizer
,
image_process
or
=
processor
)
)
image
=
INVOICE_URL
image
=
INVOICE_URL
...
@@ -81,11 +81,6 @@ class DocumentQuestionAnsweringPipelineTests(unittest.TestCase, metaclass=Pipeli
...
@@ -81,11 +81,6 @@ class DocumentQuestionAnsweringPipelineTests(unittest.TestCase, metaclass=Pipeli
"question"
:
question
,
"question"
:
question
,
"word_boxes"
:
word_boxes
,
"word_boxes"
:
word_boxes
,
},
},
{
"image"
:
None
,
"question"
:
question
,
"word_boxes"
:
word_boxes
,
},
]
]
return
dqa_pipeline
,
examples
return
dqa_pipeline
,
examples
...
@@ -99,7 +94,7 @@ class DocumentQuestionAnsweringPipelineTests(unittest.TestCase, metaclass=Pipeli
...
@@ -99,7 +94,7 @@ class DocumentQuestionAnsweringPipelineTests(unittest.TestCase, metaclass=Pipeli
{
"score"
:
ANY
(
float
),
"answer"
:
ANY
(
str
),
"start"
:
ANY
(
int
),
"end"
:
ANY
(
int
)},
{
"score"
:
ANY
(
float
),
"answer"
:
ANY
(
str
),
"start"
:
ANY
(
int
),
"end"
:
ANY
(
int
)},
]
]
]
]
*
4
,
*
3
,
)
)
@
require_torch
@
require_torch
...
...
tests/pipelines/test_pipelines_image_classification.py
View file @
a6d8a149
...
@@ -50,7 +50,7 @@ class ImageClassificationPipelineTests(unittest.TestCase, metaclass=PipelineTest
...
@@ -50,7 +50,7 @@ class ImageClassificationPipelineTests(unittest.TestCase, metaclass=PipelineTest
tf_model_mapping
=
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
tf_model_mapping
=
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
def
get_test_pipeline
(
self
,
model
,
tokenizer
,
processor
):
def
get_test_pipeline
(
self
,
model
,
tokenizer
,
processor
):
image_classifier
=
ImageClassificationPipeline
(
model
=
model
,
feature_extract
or
=
processor
,
top_k
=
2
)
image_classifier
=
ImageClassificationPipeline
(
model
=
model
,
image_process
or
=
processor
,
top_k
=
2
)
examples
=
[
examples
=
[
Image
.
open
(
"./tests/fixtures/tests_samples/COCO/000000039769.png"
),
Image
.
open
(
"./tests/fixtures/tests_samples/COCO/000000039769.png"
),
"http://images.cocodataset.org/val2017/000000039769.jpg"
,
"http://images.cocodataset.org/val2017/000000039769.jpg"
,
...
...
tests/pipelines/test_pipelines_image_segmentation.py
View file @
a6d8a149
...
@@ -25,7 +25,6 @@ from transformers import (
...
@@ -25,7 +25,6 @@ from transformers import (
MODEL_FOR_IMAGE_SEGMENTATION_MAPPING
,
MODEL_FOR_IMAGE_SEGMENTATION_MAPPING
,
MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING
,
MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING
,
MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING
,
MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING
,
AutoFeatureExtractor
,
AutoImageProcessor
,
AutoImageProcessor
,
AutoModelForImageSegmentation
,
AutoModelForImageSegmentation
,
AutoModelForInstanceSegmentation
,
AutoModelForInstanceSegmentation
,
...
@@ -555,9 +554,9 @@ class ImageSegmentationPipelineTests(unittest.TestCase, metaclass=PipelineTestCa
...
@@ -555,9 +554,9 @@ class ImageSegmentationPipelineTests(unittest.TestCase, metaclass=PipelineTestCa
model_id
=
"facebook/maskformer-swin-base-ade"
model_id
=
"facebook/maskformer-swin-base-ade"
model
=
AutoModelForInstanceSegmentation
.
from_pretrained
(
model_id
)
model
=
AutoModelForInstanceSegmentation
.
from_pretrained
(
model_id
)
feature_extractor
=
AutoFeatureExtract
or
.
from_pretrained
(
model_id
)
image_processor
=
AutoImageProcess
or
.
from_pretrained
(
model_id
)
image_segmenter
=
pipeline
(
"image-segmentation"
,
model
=
model
,
feature_extractor
=
feature_extract
or
)
image_segmenter
=
pipeline
(
"image-segmentation"
,
model
=
model
,
image_processor
=
image_process
or
)
image
=
load_dataset
(
"hf-internal-testing/fixtures_ade20k"
,
split
=
"test"
)
image
=
load_dataset
(
"hf-internal-testing/fixtures_ade20k"
,
split
=
"test"
)
file
=
image
[
0
][
"file"
]
file
=
image
[
0
][
"file"
]
...
...
tests/pipelines/test_pipelines_image_to_text.py
View file @
a6d8a149
...
@@ -37,7 +37,7 @@ class ImageToTextPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta
...
@@ -37,7 +37,7 @@ class ImageToTextPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta
tf_model_mapping
=
TF_MODEL_FOR_VISION_2_SEQ_MAPPING
tf_model_mapping
=
TF_MODEL_FOR_VISION_2_SEQ_MAPPING
def
get_test_pipeline
(
self
,
model
,
tokenizer
,
processor
):
def
get_test_pipeline
(
self
,
model
,
tokenizer
,
processor
):
pipe
=
pipeline
(
"image-to-text"
,
model
=
model
,
tokenizer
=
tokenizer
,
feature_extract
or
=
processor
)
pipe
=
pipeline
(
"image-to-text"
,
model
=
model
,
tokenizer
=
tokenizer
,
image_process
or
=
processor
)
examples
=
[
examples
=
[
Image
.
open
(
"./tests/fixtures/tests_samples/COCO/000000039769.png"
),
Image
.
open
(
"./tests/fixtures/tests_samples/COCO/000000039769.png"
),
"./tests/fixtures/tests_samples/COCO/000000039769.png"
,
"./tests/fixtures/tests_samples/COCO/000000039769.png"
,
...
...
tests/pipelines/test_pipelines_object_detection.py
View file @
a6d8a149
...
@@ -52,7 +52,7 @@ class ObjectDetectionPipelineTests(unittest.TestCase, metaclass=PipelineTestCase
...
@@ -52,7 +52,7 @@ class ObjectDetectionPipelineTests(unittest.TestCase, metaclass=PipelineTestCase
model_mapping
=
MODEL_FOR_OBJECT_DETECTION_MAPPING
model_mapping
=
MODEL_FOR_OBJECT_DETECTION_MAPPING
def
get_test_pipeline
(
self
,
model
,
tokenizer
,
processor
):
def
get_test_pipeline
(
self
,
model
,
tokenizer
,
processor
):
object_detector
=
ObjectDetectionPipeline
(
model
=
model
,
feature_extract
or
=
processor
)
object_detector
=
ObjectDetectionPipeline
(
model
=
model
,
image_process
or
=
processor
)
return
object_detector
,
[
"./tests/fixtures/tests_samples/COCO/000000039769.png"
]
return
object_detector
,
[
"./tests/fixtures/tests_samples/COCO/000000039769.png"
]
def
run_pipeline_test
(
self
,
object_detector
,
examples
):
def
run_pipeline_test
(
self
,
object_detector
,
examples
):
...
...
tests/pipelines/test_pipelines_video_classification.py
View file @
a6d8a149
...
@@ -39,7 +39,7 @@ class VideoClassificationPipelineTests(unittest.TestCase, metaclass=PipelineTest
...
@@ -39,7 +39,7 @@ class VideoClassificationPipelineTests(unittest.TestCase, metaclass=PipelineTest
example_video_filepath
=
hf_hub_download
(
example_video_filepath
=
hf_hub_download
(
repo_id
=
"nateraw/video-demo"
,
filename
=
"archery.mp4"
,
repo_type
=
"dataset"
repo_id
=
"nateraw/video-demo"
,
filename
=
"archery.mp4"
,
repo_type
=
"dataset"
)
)
video_classifier
=
VideoClassificationPipeline
(
model
=
model
,
feature_extract
or
=
processor
,
top_k
=
2
)
video_classifier
=
VideoClassificationPipeline
(
model
=
model
,
image_process
or
=
processor
,
top_k
=
2
)
examples
=
[
examples
=
[
example_video_filepath
,
example_video_filepath
,
"https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4"
,
"https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4"
,
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment