Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
f2ecb9ee
Unverified
Commit
f2ecb9ee
authored
Oct 17, 2022
by
Sylvain Gugger
Committed by
GitHub
Oct 17, 2022
Browse files
Revert "add return_tensor parameter for feature extraction (#19257)" (#19680)
This reverts commit
35bd089a
.
parent
bf0addc5
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
3 additions
and
29 deletions
+3
-29
src/transformers/pipelines/feature_extraction.py
src/transformers/pipelines/feature_extraction.py
+3
-11
tests/pipelines/test_pipelines_feature_extraction.py
tests/pipelines/test_pipelines_feature_extraction.py
+0
-18
No files found.
src/transformers/pipelines/feature_extraction.py
View file @
f2ecb9ee
...
@@ -31,8 +31,6 @@ class FeatureExtractionPipeline(Pipeline):
...
@@ -31,8 +31,6 @@ class FeatureExtractionPipeline(Pipeline):
If no framework is specified, will default to the one currently installed. If no framework is specified and
If no framework is specified, will default to the one currently installed. If no framework is specified and
both frameworks are installed, will default to the framework of the `model`, or to PyTorch if no model is
both frameworks are installed, will default to the framework of the `model`, or to PyTorch if no model is
provided.
provided.
return_tensor (`bool`, *optional*):
If `True`, returns a tensor according to the specified framework, otherwise returns a list.
task (`str`, defaults to `""`):
task (`str`, defaults to `""`):
A task-identifier for the pipeline.
A task-identifier for the pipeline.
args_parser ([`~pipelines.ArgumentHandler`], *optional*):
args_parser ([`~pipelines.ArgumentHandler`], *optional*):
...
@@ -42,7 +40,7 @@ class FeatureExtractionPipeline(Pipeline):
...
@@ -42,7 +40,7 @@ class FeatureExtractionPipeline(Pipeline):
the associated CUDA device id.
the associated CUDA device id.
"""
"""
def
_sanitize_parameters
(
self
,
truncation
=
None
,
tokenize_kwargs
=
None
,
return_tensors
=
None
,
**
kwargs
):
def
_sanitize_parameters
(
self
,
truncation
=
None
,
tokenize_kwargs
=
None
,
**
kwargs
):
if
tokenize_kwargs
is
None
:
if
tokenize_kwargs
is
None
:
tokenize_kwargs
=
{}
tokenize_kwargs
=
{}
...
@@ -55,11 +53,7 @@ class FeatureExtractionPipeline(Pipeline):
...
@@ -55,11 +53,7 @@ class FeatureExtractionPipeline(Pipeline):
preprocess_params
=
tokenize_kwargs
preprocess_params
=
tokenize_kwargs
postprocess_params
=
{}
return
preprocess_params
,
{},
{}
if
return_tensors
is
not
None
:
postprocess_params
[
"return_tensors"
]
=
return_tensors
return
preprocess_params
,
{},
postprocess_params
def
preprocess
(
self
,
inputs
,
**
tokenize_kwargs
)
->
Dict
[
str
,
GenericTensor
]:
def
preprocess
(
self
,
inputs
,
**
tokenize_kwargs
)
->
Dict
[
str
,
GenericTensor
]:
return_tensors
=
self
.
framework
return_tensors
=
self
.
framework
...
@@ -70,10 +64,8 @@ class FeatureExtractionPipeline(Pipeline):
...
@@ -70,10 +64,8 @@ class FeatureExtractionPipeline(Pipeline):
model_outputs
=
self
.
model
(
**
model_inputs
)
model_outputs
=
self
.
model
(
**
model_inputs
)
return
model_outputs
return
model_outputs
def
postprocess
(
self
,
model_outputs
,
return_tensors
=
False
):
def
postprocess
(
self
,
model_outputs
):
# [0] is the first available tensor, logits or last_hidden_state.
# [0] is the first available tensor, logits or last_hidden_state.
if
return_tensors
:
return
model_outputs
[
0
]
if
self
.
framework
==
"pt"
:
if
self
.
framework
==
"pt"
:
return
model_outputs
[
0
].
tolist
()
return
model_outputs
[
0
].
tolist
()
elif
self
.
framework
==
"tf"
:
elif
self
.
framework
==
"tf"
:
...
...
tests/pipelines/test_pipelines_feature_extraction.py
View file @
f2ecb9ee
...
@@ -15,8 +15,6 @@
...
@@ -15,8 +15,6 @@
import
unittest
import
unittest
import
numpy
as
np
import
numpy
as
np
import
tensorflow
as
tf
import
torch
from
transformers
import
(
from
transformers
import
(
FEATURE_EXTRACTOR_MAPPING
,
FEATURE_EXTRACTOR_MAPPING
,
...
@@ -135,22 +133,6 @@ class FeatureExtractionPipelineTests(unittest.TestCase, metaclass=PipelineTestCa
...
@@ -135,22 +133,6 @@ class FeatureExtractionPipelineTests(unittest.TestCase, metaclass=PipelineTestCa
tokenize_kwargs
=
tokenize_kwargs
,
tokenize_kwargs
=
tokenize_kwargs
,
)
)
@
require_torch
def
test_return_tensors_pt
(
self
):
feature_extractor
=
pipeline
(
task
=
"feature-extraction"
,
model
=
"hf-internal-testing/tiny-random-distilbert"
,
framework
=
"pt"
)
outputs
=
feature_extractor
(
"This is a test"
*
100
,
return_tensors
=
True
)
self
.
assertTrue
(
torch
.
is_tensor
(
outputs
))
@
require_tf
def
test_return_tensors_tf
(
self
):
feature_extractor
=
pipeline
(
task
=
"feature-extraction"
,
model
=
"hf-internal-testing/tiny-random-distilbert"
,
framework
=
"tf"
)
outputs
=
feature_extractor
(
"This is a test"
*
100
,
return_tensors
=
True
)
self
.
assertTrue
(
tf
.
is_tensor
(
outputs
))
def
get_shape
(
self
,
input_
,
shape
=
None
):
def
get_shape
(
self
,
input_
,
shape
=
None
):
if
shape
is
None
:
if
shape
is
None
:
shape
=
[]
shape
=
[]
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment