Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
Paddle
Commits
dbe08e9b
Commit
dbe08e9b
authored
Jun 12, 2023
by
yuguo960516yuguo
Browse files
2.4.2
parent
b5499578
Changes
302
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
1284 additions
and
1109 deletions
+1284
-1109
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_arg_max.py
.../tests/unittests/ir/inference/test_trt_convert_arg_max.py
+29
-26
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_bmm.py
...luid/tests/unittests/ir/inference/test_trt_convert_bmm.py
+32
-31
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_clip.py
...uid/tests/unittests/ir/inference/test_trt_convert_clip.py
+39
-37
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_concat.py
...d/tests/unittests/ir/inference/test_trt_convert_concat.py
+102
-75
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_conv2d_transpose.py
...ittests/ir/inference/test_trt_convert_conv2d_transpose.py
+122
-96
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_dropout.py
.../tests/unittests/ir/inference/test_trt_convert_dropout.py
+44
-32
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_elementwise.py
...ts/unittests/ir/inference/test_trt_convert_elementwise.py
+246
-196
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_equal.py
...id/tests/unittests/ir/inference/test_trt_convert_equal.py
+42
-42
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_fc.py
...fluid/tests/unittests/ir/inference/test_trt_convert_fc.py
+75
-70
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_fill_constant.py
.../unittests/ir/inference/test_trt_convert_fill_constant.py
+48
-36
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_flatten.py
.../tests/unittests/ir/inference/test_trt_convert_flatten.py
+106
-92
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_gather.py
...d/tests/unittests/ir/inference/test_trt_convert_gather.py
+72
-55
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_gather_nd.py
...ests/unittests/ir/inference/test_trt_convert_gather_nd.py
+132
-148
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_gelu.py
...uid/tests/unittests/ir/inference/test_trt_convert_gelu.py
+23
-22
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_grid_sampler.py
...s/unittests/ir/inference/test_trt_convert_grid_sampler.py
+26
-25
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_group_norm.py
...sts/unittests/ir/inference/test_trt_convert_group_norm.py
+46
-35
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_hard_sigmoid.py
...s/unittests/ir/inference/test_trt_convert_hard_sigmoid.py
+19
-20
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_hard_swish.py
...sts/unittests/ir/inference/test_trt_convert_hard_swish.py
+31
-28
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_inverse.py
.../tests/unittests/ir/inference/test_trt_convert_inverse.py
+19
-19
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_leaky_relu.py
...sts/unittests/ir/inference/test_trt_convert_leaky_relu.py
+31
-24
No files found.
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_arg_max.py
View file @
dbe08e9b
...
...
@@ -22,7 +22,6 @@ from typing import List
class
TrtConvertArgMaxTest
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
input_shape
=
program_config
.
inputs
[
"arg_max_input"
].
shape
axis
=
program_config
.
ops
[
0
].
attrs
[
"axis"
]
...
...
@@ -33,7 +32,6 @@ class TrtConvertArgMaxTest(TrtLayerAutoScanTest):
return
True
def
sample_program_configs
(
self
):
def
generate_input
(
rank
,
batch
):
dims
=
[
batch
]
for
i
in
range
(
rank
-
1
):
...
...
@@ -48,36 +46,37 @@ class TrtConvertArgMaxTest(TrtLayerAutoScanTest):
self
.
rank
=
rank
flatten
=
False
dtype
=
2
ops_config
=
[{
"op_type"
:
"arg_max"
,
"op_inputs"
:
{
"X"
:
[
"arg_max_input"
]
},
"op_outputs"
:
{
"Out"
:
[
"arg_max_out"
]
},
"op_attrs"
:
{
"axis"
:
axis
,
"keepdims"
:
keepdims
,
"flatten"
:
flatten
,
"dtype"
:
dtype
ops_config
=
[
{
"op_type"
:
"arg_max"
,
"op_inputs"
:
{
"X"
:
[
"arg_max_input"
]},
"op_outputs"
:
{
"Out"
:
[
"arg_max_out"
]},
"op_attrs"
:
{
"axis"
:
axis
,
"keepdims"
:
keepdims
,
"flatten"
:
flatten
,
"dtype"
:
dtype
,
},
}
}
]
]
ops
=
self
.
generate_op_config
(
ops_config
)
program_config
=
ProgramConfig
(
ops
=
ops
,
weights
=
{},
inputs
=
{
"arg_max_input"
:
TensorConfig
(
data_gen
=
partial
(
generate_input
,
rank
,
batch
))
"arg_max_input"
:
TensorConfig
(
data_gen
=
partial
(
generate_input
,
rank
,
batch
)
)
},
outputs
=
[
"arg_max_out"
])
outputs
=
[
"arg_max_out"
],
)
yield
program_config
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
if
self
.
rank
==
3
:
self
.
dynamic_shape
.
min_input_shape
=
{
...
...
@@ -117,19 +116,23 @@ class TrtConvertArgMaxTest(TrtLayerAutoScanTest):
clear_dynamic_shape
()
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
1e-5
attrs
,
False
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
1e-5
attrs
,
False
),
1e-3
# for dynamic_shape
generate_dynamic_shape
(
attrs
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
1e-5
attrs
,
True
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
1e-5
attrs
,
True
),
1e-3
def
test
(
self
):
self
.
run_test
()
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_bmm.py
View file @
dbe08e9b
...
...
@@ -12,20 +12,18 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from
trt_layer_auto_scan_test
import
TrtLayerAutoScanTest
,
SkipReasons
from
trt_layer_auto_scan_test
import
TrtLayerAutoScanTest
from
program_config
import
TensorConfig
,
ProgramConfig
import
numpy
as
np
import
paddle.inference
as
paddle_infer
from
functools
import
partial
from
typing
import
Optional
,
List
,
Callable
,
Dict
,
Any
,
Se
t
from
typing
import
Lis
t
import
unittest
import
os
class
TrtConvertBmmTest_dynamic
(
TrtLayerAutoScanTest
):
def
sample_program_configs
(
self
):
def
generate_input
(
shape
):
return
np
.
random
.
random
(
shape
).
astype
(
np
.
float32
)
...
...
@@ -33,48 +31,47 @@ class TrtConvertBmmTest_dynamic(TrtLayerAutoScanTest):
input1_shape
=
[
batch
,
350
,
75
]
input2_shape
=
[
batch
,
75
,
25
]
dics
=
[{}]
ops_config
=
[{
"op_type"
:
"bmm"
,
"op_inputs"
:
{
"X"
:
[
"input1_data"
],
"Y"
:
[
"input2_data"
]
},
"op_outputs"
:
{
"Out"
:
[
"output_data"
]
},
"op_attrs"
:
dics
[
0
]
}]
ops_config
=
[
{
"op_type"
:
"bmm"
,
"op_inputs"
:
{
"X"
:
[
"input1_data"
],
"Y"
:
[
"input2_data"
]},
"op_outputs"
:
{
"Out"
:
[
"output_data"
]},
"op_attrs"
:
dics
[
0
],
}
]
ops
=
self
.
generate_op_config
(
ops_config
)
program_config
=
ProgramConfig
(
ops
=
ops
,
weights
=
{},
inputs
=
{
"input1_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input
,
input1_shape
)),
"input2_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input
,
input2_shape
))
"input1_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input
,
input1_shape
)
),
"input2_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input
,
input2_shape
)
),
},
outputs
=
[
"output_data"
])
outputs
=
[
"output_data"
],
)
yield
program_config
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
self
.
dynamic_shape
.
min_input_shape
=
{
"input1_data"
:
[
10
,
350
,
75
],
"input2_data"
:
[
10
,
75
,
25
]
"input2_data"
:
[
10
,
75
,
25
]
,
}
self
.
dynamic_shape
.
max_input_shape
=
{
"input1_data"
:
[
100
,
350
,
75
],
"input2_data"
:
[
100
,
75
,
25
]
"input2_data"
:
[
100
,
75
,
25
]
,
}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input1_data"
:
[
15
,
350
,
75
],
"input2_data"
:
[
15
,
75
,
25
]
"input2_data"
:
[
15
,
75
,
25
]
,
}
def
clear_dynamic_shape
():
...
...
@@ -95,25 +92,29 @@ class TrtConvertBmmTest_dynamic(TrtLayerAutoScanTest):
clear_dynamic_shape
()
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
1e-5
attrs
,
False
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
1e-5
attrs
,
False
),
1e-3
# The output has little diff between gpu and trt in CI-Windows-Inference
tol_fp32
=
1e-4
tol_half
=
1e-4
if
(
os
.
name
==
'nt'
)
:
if
os
.
name
==
'nt'
:
tol_fp32
=
1e-2
tol_half
=
1e-2
# for dynamic_shape
generate_dynamic_shape
(
attrs
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
tol_fp32
attrs
,
True
),
tol_fp32
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
tol_half
attrs
,
True
),
tol_half
def
add_skip_trt_case
(
self
):
pass
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_clip.py
View file @
dbe08e9b
...
...
@@ -22,12 +22,10 @@ import unittest
class
TrtConvertClipTest
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
return
True
def
sample_program_configs
(
self
):
def
generate_input1
(
dims
,
batch
,
attrs
:
List
[
Dict
[
str
,
Any
]]):
if
dims
==
1
:
return
np
.
ones
([
32
]).
astype
(
np
.
float32
)
...
...
@@ -46,52 +44,52 @@ class TrtConvertClipTest(TrtLayerAutoScanTest):
for
dims
in
[
1
,
2
,
3
,
4
]:
for
batch
in
[
1
,
4
]:
for
op_inputs
in
[{
"X"
:
[
"input_data"
]
},
{
"X"
:
[
"input_data"
],
"Min"
:
[
"Min_"
],
"Max"
:
[
"Max_"
]
}]:
for
op_inputs
in
[
{
"X"
:
[
"input_data"
]},
{
"X"
:
[
"input_data"
],
"Min"
:
[
"Min_"
],
"Max"
:
[
"Max_"
]},
]:
self
.
input_num
=
len
(
op_inputs
)
self
.
dims
=
dims
dics
=
[{
"min"
:
np
.
random
.
uniform
(
1
,
10
),
"max"
:
np
.
random
.
uniform
(
10
,
20
)
},
{
"op_inputs"
:
op_inputs
}]
ops_config
=
[{
"op_type"
:
"clip"
,
"op_inputs"
:
op_inputs
,
"op_outputs"
:
{
"Out"
:
[
"output_data"
]
dics
=
[
{
"min"
:
np
.
random
.
uniform
(
1
,
10
),
"max"
:
np
.
random
.
uniform
(
10
,
20
),
},
"op_attrs"
:
dics
[
0
]
}]
{
"op_inputs"
:
op_inputs
},
]
ops_config
=
[
{
"op_type"
:
"clip"
,
"op_inputs"
:
op_inputs
,
"op_outputs"
:
{
"Out"
:
[
"output_data"
]},
"op_attrs"
:
dics
[
0
],
}
]
ops
=
self
.
generate_op_config
(
ops_config
)
program_config
=
ProgramConfig
(
ops
=
ops
,
weights
=
{
"Min_"
:
TensorConfig
(
data_gen
=
partial
(
generate_weight1
,
dics
)
),
"Max_"
:
TensorConfig
(
data_gen
=
partial
(
generate_weight2
,
dics
)
)
"Min_"
:
TensorConfig
(
data_gen
=
partial
(
generate_weight1
,
dics
)
),
"Max_"
:
TensorConfig
(
data_gen
=
partial
(
generate_weight2
,
dics
)
)
,
},
inputs
=
{
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input1
,
dims
,
batch
,
dics
))
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input1
,
dims
,
batch
,
dics
)
)
},
outputs
=
[
"output_data"
])
outputs
=
[
"output_data"
],
)
yield
program_config
def
sample_predictor_configs
(
self
,
program_config
):
def
generate_dynamic_shape
(
attrs
):
if
self
.
dims
==
1
:
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
]}
...
...
@@ -135,19 +133,23 @@ class TrtConvertClipTest(TrtLayerAutoScanTest):
clear_dynamic_shape
()
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
1e-5
attrs
,
False
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
1e-5
attrs
,
False
),
1e-3
# for dynamic_shape
generate_dynamic_shape
(
attrs
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
1e-5
attrs
,
True
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
1e-5
attrs
,
True
),
1e-3
def
test
(
self
):
self
.
run_test
()
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_concat.py
View file @
dbe08e9b
...
...
@@ -22,7 +22,6 @@ from typing import Optional, List, Callable, Dict, Any, Set
class
TrtConvertConcatTest
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
inputs
=
program_config
.
inputs
weights
=
program_config
.
weights
...
...
@@ -31,14 +30,13 @@ class TrtConvertConcatTest(TrtLayerAutoScanTest):
attrs
=
[
program_config
.
ops
[
i
].
attrs
for
i
in
range
(
len
(
program_config
.
ops
))
]
#The input dimension should be less than or equal to the set axis.
#
The input dimension should be less than or equal to the set axis.
if
len
(
inputs
[
'concat_input1'
].
shape
)
<=
attrs
[
0
][
'axis'
]:
return
False
return
True
def
sample_program_configs
(
self
):
def
generate_input1
(
attrs
:
List
[
Dict
[
str
,
Any
]],
batch
):
if
self
.
dims
==
4
:
return
np
.
ones
([
batch
,
3
,
24
,
24
]).
astype
(
np
.
float32
)
...
...
@@ -79,58 +77,83 @@ class TrtConvertConcatTest(TrtLayerAutoScanTest):
self
.
num_input
=
num_input
self
.
dims
=
dims
dics
=
[{
"axis"
:
axis
},
{}]
dics_intput
=
[{
"X"
:
[
"concat_input1"
,
"concat_input2"
,
"concat_input3"
],
"AxisTensor"
:
[
"AxisTensor"
],
},
{
"X"
:
[
"concat_input1"
,
"concat_input2"
,
"concat_input3"
]
}]
dics_inputs
=
[{
"concat_input1"
:
TensorConfig
(
data_gen
=
partial
(
generate_input1
,
dics
,
batch
)),
"concat_input2"
:
TensorConfig
(
data_gen
=
partial
(
generate_input2
,
dics
,
batch
)),
"concat_input3"
:
TensorConfig
(
data_gen
=
partial
(
generate_input3
,
dics
,
batch
)),
"AxisTensor"
:
TensorConfig
(
data_gen
=
partial
(
generate_weight1
,
dics
))
},
{
"concat_input1"
:
TensorConfig
(
data_gen
=
partial
(
generate_input1
,
dics
,
batch
)),
"concat_input2"
:
TensorConfig
(
data_gen
=
partial
(
generate_input2
,
dics
,
batch
)),
"concat_input3"
:
TensorConfig
(
data_gen
=
partial
(
generate_input3
,
dics
,
batch
))
}]
ops_config
=
[{
"op_type"
:
"concat"
,
"op_inputs"
:
dics_intput
[
num_input
],
"op_outputs"
:
{
"Out"
:
[
"concat_output"
]
dics_intput
=
[
{
"X"
:
[
"concat_input1"
,
"concat_input2"
,
"concat_input3"
,
],
"AxisTensor"
:
[
"AxisTensor"
],
},
{
"X"
:
[
"concat_input1"
,
"concat_input2"
,
"concat_input3"
,
]
},
]
dics_inputs
=
[
{
"concat_input1"
:
TensorConfig
(
data_gen
=
partial
(
generate_input1
,
dics
,
batch
)
),
"concat_input2"
:
TensorConfig
(
data_gen
=
partial
(
generate_input2
,
dics
,
batch
)
),
"concat_input3"
:
TensorConfig
(
data_gen
=
partial
(
generate_input3
,
dics
,
batch
)
),
"AxisTensor"
:
TensorConfig
(
data_gen
=
partial
(
generate_weight1
,
dics
)
),
},
{
"concat_input1"
:
TensorConfig
(
data_gen
=
partial
(
generate_input1
,
dics
,
batch
)
),
"concat_input2"
:
TensorConfig
(
data_gen
=
partial
(
generate_input2
,
dics
,
batch
)
),
"concat_input3"
:
TensorConfig
(
data_gen
=
partial
(
generate_input3
,
dics
,
batch
)
),
},
"op_attrs"
:
dics
[
0
]
}]
]
ops_config
=
[
{
"op_type"
:
"concat"
,
"op_inputs"
:
dics_intput
[
num_input
],
"op_outputs"
:
{
"Out"
:
[
"concat_output"
]},
"op_attrs"
:
dics
[
0
],
}
]
ops
=
self
.
generate_op_config
(
ops_config
)
program_config
=
ProgramConfig
(
ops
=
ops
,
weights
=
{},
inputs
=
dics_inputs
[
num_input
],
outputs
=
[
"concat_output"
])
outputs
=
[
"concat_output"
],
)
yield
program_config
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
if
self
.
num_input
==
0
:
if
self
.
dims
==
4
:
...
...
@@ -138,76 +161,76 @@ class TrtConvertConcatTest(TrtLayerAutoScanTest):
"concat_input1"
:
[
1
,
3
,
24
,
24
],
"concat_input2"
:
[
1
,
3
,
24
,
24
],
"concat_input3"
:
[
1
,
3
,
24
,
24
],
"AxisTensor"
:
[
1
]
"AxisTensor"
:
[
1
]
,
}
self
.
dynamic_shape
.
max_input_shape
=
{
"concat_input1"
:
[
4
,
3
,
48
,
48
],
"concat_input2"
:
[
4
,
3
,
48
,
48
],
"concat_input3"
:
[
4
,
3
,
48
,
48
],
"AxisTensor"
:
[
1
]
"AxisTensor"
:
[
1
]
,
}
self
.
dynamic_shape
.
opt_input_shape
=
{
"concat_input1"
:
[
1
,
3
,
24
,
24
],
"concat_input2"
:
[
1
,
3
,
24
,
24
],
"concat_input3"
:
[
1
,
3
,
24
,
24
],
"AxisTensor"
:
[
1
]
"AxisTensor"
:
[
1
]
,
}
elif
self
.
dims
==
3
:
self
.
dynamic_shape
.
min_input_shape
=
{
"concat_input1"
:
[
1
,
3
,
24
],
"concat_input2"
:
[
1
,
3
,
24
],
"concat_input3"
:
[
1
,
3
,
24
],
"AxisTensor"
:
[
1
]
"AxisTensor"
:
[
1
]
,
}
self
.
dynamic_shape
.
max_input_shape
=
{
"concat_input1"
:
[
4
,
12
,
48
],
"concat_input2"
:
[
4
,
12
,
48
],
"concat_input3"
:
[
4
,
12
,
48
],
"AxisTensor"
:
[
1
]
"AxisTensor"
:
[
1
]
,
}
self
.
dynamic_shape
.
opt_input_shape
=
{
"concat_input1"
:
[
1
,
3
,
24
],
"concat_input2"
:
[
1
,
3
,
24
],
"concat_input3"
:
[
1
,
3
,
24
],
"AxisTensor"
:
[
1
]
"AxisTensor"
:
[
1
]
,
}
elif
self
.
dims
==
2
:
self
.
dynamic_shape
.
min_input_shape
=
{
"concat_input1"
:
[
1
,
24
],
"concat_input2"
:
[
1
,
24
],
"concat_input3"
:
[
1
,
24
],
"AxisTensor"
:
[
1
]
"AxisTensor"
:
[
1
]
,
}
self
.
dynamic_shape
.
max_input_shape
=
{
"concat_input1"
:
[
4
,
48
],
"concat_input2"
:
[
4
,
48
],
"concat_input3"
:
[
4
,
48
],
"AxisTensor"
:
[
1
]
"AxisTensor"
:
[
1
]
,
}
self
.
dynamic_shape
.
opt_input_shape
=
{
"concat_input1"
:
[
1
,
24
],
"concat_input2"
:
[
1
,
24
],
"concat_input3"
:
[
1
,
24
],
"AxisTensor"
:
[
1
]
"AxisTensor"
:
[
1
]
,
}
elif
self
.
dims
==
1
:
self
.
dynamic_shape
.
min_input_shape
=
{
"concat_input1"
:
[
24
],
"concat_input2"
:
[
24
],
"concat_input3"
:
[
24
],
"AxisTensor"
:
[
0
]
"AxisTensor"
:
[
0
]
,
}
self
.
dynamic_shape
.
max_input_shape
=
{
"concat_input1"
:
[
48
],
"concat_input2"
:
[
48
],
"concat_input3"
:
[
48
],
"AxisTensor"
:
[
0
]
"AxisTensor"
:
[
0
]
,
}
self
.
dynamic_shape
.
opt_input_shape
=
{
"concat_input1"
:
[
24
],
"concat_input2"
:
[
24
],
"concat_input3"
:
[
24
],
"AxisTensor"
:
[
0
]
"AxisTensor"
:
[
0
]
,
}
elif
self
.
num_input
==
1
:
if
self
.
dims
==
4
:
...
...
@@ -219,60 +242,60 @@ class TrtConvertConcatTest(TrtLayerAutoScanTest):
self
.
dynamic_shape
.
max_input_shape
=
{
"concat_input1"
:
[
4
,
3
,
48
,
48
],
"concat_input2"
:
[
4
,
3
,
48
,
48
],
"concat_input3"
:
[
4
,
3
,
48
,
48
]
"concat_input3"
:
[
4
,
3
,
48
,
48
]
,
}
self
.
dynamic_shape
.
opt_input_shape
=
{
"concat_input1"
:
[
1
,
3
,
24
,
24
],
"concat_input2"
:
[
1
,
3
,
24
,
24
],
"concat_input3"
:
[
1
,
3
,
24
,
24
]
"concat_input3"
:
[
1
,
3
,
24
,
24
]
,
}
elif
self
.
dims
==
3
:
self
.
dynamic_shape
.
min_input_shape
=
{
"concat_input1"
:
[
1
,
3
,
24
],
"concat_input2"
:
[
1
,
3
,
24
],
"concat_input3"
:
[
1
,
3
,
24
]
"concat_input3"
:
[
1
,
3
,
24
]
,
}
self
.
dynamic_shape
.
max_input_shape
=
{
"concat_input1"
:
[
4
,
12
,
48
],
"concat_input2"
:
[
4
,
12
,
48
],
"concat_input3"
:
[
4
,
12
,
48
]
"concat_input3"
:
[
4
,
12
,
48
]
,
}
self
.
dynamic_shape
.
opt_input_shape
=
{
"concat_input1"
:
[
1
,
3
,
24
],
"concat_input2"
:
[
1
,
3
,
24
],
"concat_input3"
:
[
1
,
3
,
24
]
"concat_input3"
:
[
1
,
3
,
24
]
,
}
elif
self
.
dims
==
2
:
self
.
dynamic_shape
.
min_input_shape
=
{
"concat_input1"
:
[
1
,
24
],
"concat_input2"
:
[
1
,
24
],
"concat_input3"
:
[
1
,
24
]
"concat_input3"
:
[
1
,
24
]
,
}
self
.
dynamic_shape
.
max_input_shape
=
{
"concat_input1"
:
[
4
,
48
],
"concat_input2"
:
[
4
,
48
],
"concat_input3"
:
[
4
,
48
]
"concat_input3"
:
[
4
,
48
]
,
}
self
.
dynamic_shape
.
opt_input_shape
=
{
"concat_input1"
:
[
1
,
24
],
"concat_input2"
:
[
1
,
24
],
"concat_input3"
:
[
1
,
24
]
"concat_input3"
:
[
1
,
24
]
,
}
elif
self
.
dims
==
1
:
self
.
dynamic_shape
.
min_input_shape
=
{
"concat_input1"
:
[
24
],
"concat_input2"
:
[
24
],
"concat_input3"
:
[
24
]
"concat_input3"
:
[
24
]
,
}
self
.
dynamic_shape
.
max_input_shape
=
{
"concat_input1"
:
[
48
],
"concat_input2"
:
[
48
],
"concat_input3"
:
[
48
]
"concat_input3"
:
[
48
]
,
}
self
.
dynamic_shape
.
opt_input_shape
=
{
"concat_input1"
:
[
24
],
"concat_input2"
:
[
24
],
"concat_input3"
:
[
24
]
"concat_input3"
:
[
24
]
,
}
def
clear_dynamic_shape
():
...
...
@@ -296,29 +319,33 @@ class TrtConvertConcatTest(TrtLayerAutoScanTest):
clear_dynamic_shape
()
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
1e-5
attrs
,
False
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
1e-5
attrs
,
False
),
1e-3
# for dynamic_shape
generate_dynamic_shape
(
attrs
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
1e-5
attrs
,
True
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
1e-5
attrs
,
True
),
1e-3
def
add_skip_trt_case
(
self
):
def
teller1
(
program_config
,
predictor_config
):
if
len
(
program_config
.
inputs
)
==
4
:
return
True
return
False
self
.
add_skip_case
(
teller1
,
SkipReasons
.
TRT_NOT_SUPPORT
,
"INPUT AxisTensor NOT SUPPORT"
)
self
.
add_skip_case
(
teller1
,
SkipReasons
.
TRT_NOT_SUPPORT
,
"INPUT AxisTensor NOT SUPPORT"
)
def
test
(
self
):
self
.
add_skip_trt_case
()
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_conv2d_transpose.py
View file @
dbe08e9b
...
...
@@ -22,7 +22,6 @@ from typing import Optional, List, Callable, Dict, Any, Set
class
TrtConvertConv2dTransposeTest
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
inputs
=
program_config
.
inputs
weights
=
program_config
.
weights
...
...
@@ -30,8 +29,10 @@ class TrtConvertConv2dTransposeTest(TrtLayerAutoScanTest):
program_config
.
ops
[
i
].
attrs
for
i
in
range
(
len
(
program_config
.
ops
))
]
if
inputs
[
'input_data'
].
shape
[
1
]
!=
weights
[
'conv2d_weight'
].
shape
[
1
]
*
attrs
[
0
][
'groups'
]:
if
(
inputs
[
'input_data'
].
shape
[
1
]
!=
weights
[
'conv2d_weight'
].
shape
[
1
]
*
attrs
[
0
][
'groups'
]
):
return
False
if
inputs
[
'input_data'
].
shape
[
1
]
!=
weights
[
'conv2d_weight'
].
shape
[
0
]:
...
...
@@ -54,12 +55,13 @@ class TrtConvertConv2dTransposeTest(TrtLayerAutoScanTest):
def
generate_weight1
(
num_channels
,
attrs
:
List
[
Dict
[
str
,
Any
]]):
if
attrs
[
0
][
'groups'
]
==
1
:
return
np
.
random
.
random
([
num_channels
,
num_channels
,
3
,
3
]).
astype
(
np
.
float32
)
return
np
.
random
.
random
(
[
num_channels
,
num_channels
,
3
,
3
]
).
astype
(
np
.
float32
)
else
:
return
np
.
random
.
random
(
[
num_channels
,
int
(
num_channels
/
2
),
3
,
3
]
).
astype
(
np
.
float32
)
[
num_channels
,
int
(
num_channels
/
2
),
3
,
3
]
).
astype
(
np
.
float32
)
for
num_channels
in
[
2
,
4
,
6
]:
for
batch
in
[
1
,
4
]:
...
...
@@ -67,99 +69,113 @@ class TrtConvertConv2dTransposeTest(TrtLayerAutoScanTest):
for
paddings
in
[[
0
,
3
],
[
1
,
2
,
3
,
4
]]:
for
groups
in
[
2
]:
for
padding_algorithm
in
[
'EXPLICIT'
,
'SAME'
,
'VALID'
'EXPLICIT'
,
'SAME'
,
'VALID'
,
]:
for
dilations
in
[[
2
,
2
],
[
1
,
2
]]:
for
data_format
in
[
'NCHW'
]:
self
.
num_channels
=
num_channels
dics
=
[{
"data_fromat"
:
data_format
,
"dilations"
:
dilations
,
"padding_algorithm"
:
padding_algorithm
,
"groups"
:
groups
,
"paddings"
:
paddings
,
"strides"
:
strides
,
"data_format"
:
data_format
,
"output_size"
:
[],
"output_padding"
:
[]
}]
ops_config
=
[{
"op_type"
:
"conv2d_transpose"
,
"op_inputs"
:
{
"Input"
:
[
"input_data"
],
"Filter"
:
[
"conv2d_weight"
]
},
"op_outputs"
:
{
"Output"
:
[
"output_data"
]
},
"op_attrs"
:
dics
[
0
]
}]
dics
=
[
{
"data_fromat"
:
data_format
,
"dilations"
:
dilations
,
"padding_algorithm"
:
padding_algorithm
,
"groups"
:
groups
,
"paddings"
:
paddings
,
"strides"
:
strides
,
"data_format"
:
data_format
,
"output_size"
:
[],
"output_padding"
:
[],
}
]
ops_config
=
[
{
"op_type"
:
"conv2d_transpose"
,
"op_inputs"
:
{
"Input"
:
[
"input_data"
],
"Filter"
:
[
"conv2d_weight"
],
},
"op_outputs"
:
{
"Output"
:
[
"output_data"
]
},
"op_attrs"
:
dics
[
0
],
}
]
ops
=
self
.
generate_op_config
(
ops_config
)
ops_config
)
program_config
=
ProgramConfig
(
ops
=
ops
,
weights
=
{
"conv2d_weight"
:
TensorConfig
(
data_gen
=
partial
(
generate_weight1
,
num_channels
,
dics
))
"conv2d_weight"
:
TensorConfig
(
data_gen
=
partial
(
generate_weight1
,
num_channels
,
dics
,
)
)
},
inputs
=
{
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input1
,
batch
,
num_channels
,
dics
))
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input1
,
batch
,
num_channels
,
dics
,
)
)
},
outputs
=
[
"output_data"
])
outputs
=
[
"output_data"
],
)
yield
program_config
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
if
self
.
num_channels
==
2
:
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
2
,
32
,
32
],
"output_data"
:
[
1
,
24
,
32
,
32
]
"output_data"
:
[
1
,
24
,
32
,
32
]
,
}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
4
,
2
,
64
,
64
],
"output_data"
:
[
4
,
24
,
64
,
64
]
"output_data"
:
[
4
,
24
,
64
,
64
]
,
}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
1
,
2
,
64
,
64
],
"output_data"
:
[
1
,
24
,
64
,
64
]
"output_data"
:
[
1
,
24
,
64
,
64
]
,
}
elif
self
.
num_channels
==
4
:
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
4
,
32
,
32
],
"output_data"
:
[
1
,
24
,
32
,
32
]
"output_data"
:
[
1
,
24
,
32
,
32
]
,
}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
4
,
4
,
64
,
64
],
"output_data"
:
[
4
,
24
,
64
,
64
]
"output_data"
:
[
4
,
24
,
64
,
64
]
,
}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
1
,
4
,
64
,
64
],
"output_data"
:
[
1
,
24
,
64
,
64
]
"output_data"
:
[
1
,
24
,
64
,
64
]
,
}
else
:
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
6
,
32
,
32
],
"output_data"
:
[
1
,
24
,
32
,
32
]
"output_data"
:
[
1
,
24
,
32
,
32
]
,
}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
4
,
6
,
64
,
64
],
"output_data"
:
[
4
,
24
,
64
,
64
]
"output_data"
:
[
4
,
24
,
64
,
64
]
,
}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
1
,
6
,
64
,
64
],
"output_data"
:
[
1
,
24
,
64
,
64
]
"output_data"
:
[
1
,
24
,
64
,
64
]
,
}
def
clear_dynamic_shape
():
...
...
@@ -178,10 +194,12 @@ class TrtConvertConv2dTransposeTest(TrtLayerAutoScanTest):
clear_dynamic_shape
()
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
1e-5
attrs
,
False
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
(
1e-5
,
1e-3
)
attrs
,
False
),
(
1e-3
,
1e-3
)
# self.trt_param.precision = paddle_infer.PrecisionType.Int8
# yield self.create_inference_config(), generate_trt_nodes_num(
# attrs, False), (1e-5, 1e-5)
...
...
@@ -190,24 +208,26 @@ class TrtConvertConv2dTransposeTest(TrtLayerAutoScanTest):
generate_dynamic_shape
(
attrs
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
1e-5
attrs
,
True
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
(
1e-5
,
1e-3
)
attrs
,
True
),
(
1e-3
,
1e-3
)
# self.trt_param.precision = paddle_infer.PrecisionType.Int8
# yield self.create_inference_config(), generate_trt_nodes_num(
# attrs, True), (1e-5, 1e-5)
def
add_skip_trt_case
(
self
):
def
teller1
(
program_config
,
predictor_config
):
if
self
.
trt_param
.
precision
==
paddle_infer
.
PrecisionType
.
Int8
:
return
True
return
False
self
.
add_skip_case
(
teller1
,
SkipReasons
.
TRT_NOT_IMPLEMENTED
,
"When precisionType is int8 without relu op, output is different between Trt and Paddle."
teller1
,
SkipReasons
.
TRT_NOT_IMPLEMENTED
,
"When precisionType is int8 without relu op, output is different between Trt and Paddle."
,
)
def
test
(
self
):
...
...
@@ -221,7 +241,6 @@ class TrtConvertConv2dTransposeTest(TrtLayerAutoScanTest):
# Special case
class
TrtConvertConv2dTransposeTest2
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
ver
=
paddle_infer
.
get_trt_compile_version
()
if
ver
[
0
]
*
1000
+
ver
[
1
]
*
100
+
ver
[
2
]
*
10
<
7000
:
...
...
@@ -241,49 +260,52 @@ class TrtConvertConv2dTransposeTest2(TrtLayerAutoScanTest):
batch
=
1
self
.
num_channels
=
num_channels
dics
=
[{
"data_fromat"
:
'NCHW'
,
"dilations"
:
[
1
,
1
],
"padding_algorithm"
:
'EXPLICIT'
,
"groups"
:
1
,
"paddings"
:
[
1
,
1
],
"strides"
:
[
2
,
2
],
"output_padding"
:
[
1
,
1
],
"output_size"
:
[],
}]
ops_config
=
[{
"op_type"
:
"conv2d_transpose"
,
"op_inputs"
:
{
"Input"
:
[
"input_data"
],
"Filter"
:
[
"conv2d_weight"
]
},
"op_outputs"
:
{
"Output"
:
[
"output_data"
]
},
"op_attrs"
:
dics
[
0
]
}]
dics
=
[
{
"data_fromat"
:
'NCHW'
,
"dilations"
:
[
1
,
1
],
"padding_algorithm"
:
'EXPLICIT'
,
"groups"
:
1
,
"paddings"
:
[
1
,
1
],
"strides"
:
[
2
,
2
],
"output_padding"
:
[
1
,
1
],
"output_size"
:
[],
}
]
ops_config
=
[
{
"op_type"
:
"conv2d_transpose"
,
"op_inputs"
:
{
"Input"
:
[
"input_data"
],
"Filter"
:
[
"conv2d_weight"
],
},
"op_outputs"
:
{
"Output"
:
[
"output_data"
]},
"op_attrs"
:
dics
[
0
],
}
]
ops
=
self
.
generate_op_config
(
ops_config
)
program_config
=
ProgramConfig
(
ops
=
ops
,
weights
=
{
"conv2d_weight"
:
TensorConfig
(
data_gen
=
partial
(
generate_weight1
,
num_channels
,
dics
)
)
"conv2d_weight"
:
TensorConfig
(
data_gen
=
partial
(
generate_weight1
,
num_channels
,
dics
)
)
},
inputs
=
{
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input1
,
batch
,
num_channels
,
dics
)
)
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input1
,
batch
,
num_channels
,
dics
)
)
},
outputs
=
[
"output_data"
])
outputs
=
[
"output_data"
],
)
yield
program_config
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
128
,
20
,
30
],
...
...
@@ -311,19 +333,23 @@ class TrtConvertConv2dTransposeTest2(TrtLayerAutoScanTest):
clear_dynamic_shape
()
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
1e-4
attrs
,
False
),
1e-4
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
(
1e0
,
1e-3
)
attrs
,
False
),
(
1e0
,
1e-3
)
# for dynamic_shape
generate_dynamic_shape
(
attrs
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
1e-4
attrs
,
True
),
1e-4
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
(
1e0
,
1e-3
)
attrs
,
True
),
(
1e0
,
1e-3
)
def
add_skip_trt_case
(
self
):
pass
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_dropout.py
View file @
dbe08e9b
...
...
@@ -22,12 +22,10 @@ from typing import Optional, List, Callable, Dict, Any, Set
class
TrtConvertDropoutTest
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
return
True
def
sample_program_configs
(
self
):
def
generate_input1
(
dims
,
batch
,
attrs
:
List
[
Dict
[
str
,
Any
]]):
if
dims
==
1
:
return
np
.
ones
([
64
]).
astype
(
np
.
float32
)
...
...
@@ -42,47 +40,57 @@ class TrtConvertDropoutTest(TrtLayerAutoScanTest):
for
batch
in
[
1
,
2
,
4
]:
for
fix_seed
in
[
False
,
True
]:
for
dropout_implementation
in
[
"downgrade_in_infer"
,
"upscale_in_train"
"downgrade_in_infer"
,
"upscale_in_train"
,
]:
for
dropout_prob
in
[
np
.
random
.
random
()]:
for
seed
in
[
0
,
64
,
128
,
512
]:
self
.
dims
=
dims
dics
=
[{
"fix_seed"
:
fix_seed
,
"dropout_implementation"
:
dropout_implementation
,
"dropout_prob"
:
dropout_prob
,
"seed"
:
seed
,
"is_test"
:
True
}]
ops_config
=
[{
"op_type"
:
"dropout"
,
"op_inputs"
:
{
"X"
:
[
"input_data"
],
},
"op_outputs"
:
{
"Out"
:
[
"dropout_output_data"
]
},
"op_attrs"
:
dics
[
0
]
}]
dics
=
[
{
"fix_seed"
:
fix_seed
,
"dropout_implementation"
:
dropout_implementation
,
"dropout_prob"
:
dropout_prob
,
"seed"
:
seed
,
"is_test"
:
True
,
}
]
ops_config
=
[
{
"op_type"
:
"dropout"
,
"op_inputs"
:
{
"X"
:
[
"input_data"
],
},
"op_outputs"
:
{
"Out"
:
[
"dropout_output_data"
]
},
"op_attrs"
:
dics
[
0
],
}
]
ops
=
self
.
generate_op_config
(
ops_config
)
program_config
=
ProgramConfig
(
ops
=
ops
,
weights
=
{},
inputs
=
{
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input1
,
dims
,
batch
,
dics
))
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input1
,
dims
,
batch
,
dics
,
)
)
},
outputs
=
[
"dropout_output_data"
])
outputs
=
[
"dropout_output_data"
],
)
yield
program_config
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
if
self
.
dims
==
1
:
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
]}
...
...
@@ -128,19 +136,23 @@ class TrtConvertDropoutTest(TrtLayerAutoScanTest):
clear_dynamic_shape
()
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
1e-5
attrs
,
False
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
1e-5
attrs
,
False
),
1e-3
# for dynamic_shape
generate_dynamic_shape
(
attrs
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
1e-5
attrs
,
True
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
1e-5
attrs
,
True
),
1e-3
def
add_skip_trt_case
(
self
):
pass
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_elementwise.py
View file @
dbe08e9b
...
...
@@ -24,12 +24,10 @@ from typing import Optional, List, Callable, Dict, Any, Set
# This is the special test case with weight including batch dimension
# I don't want to mess up the code written by others, so I wrote a class specifically
class
TrtConvertElementwiseTest_one_input_special_case0
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
return
True
def
sample_program_configs
(
self
):
def
generate_input
(
shape
):
return
np
.
random
.
random
(
shape
).
astype
(
np
.
float32
)
...
...
@@ -39,44 +37,50 @@ class TrtConvertElementwiseTest_one_input_special_case0(TrtLayerAutoScanTest):
for
batch
in
[
1
,
4
]:
for
shape
in
[[
batch
,
32
,
16
,
32
]]:
for
op_type
in
[
"elementwise_add"
,
"elementwise_mul"
,
"elementwise_sub"
,
"elementwise_div"
,
"elementwise_pow"
,
"elementwise_min"
,
"elementwise_max"
"elementwise_add"
,
"elementwise_mul"
,
"elementwise_sub"
,
"elementwise_div"
,
"elementwise_pow"
,
"elementwise_min"
,
"elementwise_max"
,
]:
for
axis
in
[
-
1
]:
self
.
dims
=
len
(
shape
)
dics
=
[{
"axis"
:
axis
}]
ops_config
=
[
{
"op_type"
:
op_type
,
"op_inputs"
:
{
"
X"
:
[
"input_data"
],
"Y"
:
[
"weight
"
]
}
,
"op_outputs"
:
{
"Out"
:
[
"output_data"
]
}
,
"op_attrs"
:
dics
[
0
]
}
]
ops_config
=
[
{
"op_type"
:
op_type
,
"
op_inputs"
:
{
"X"
:
[
"input_data
"
]
,
"Y"
:
[
"weight"
]
,
},
"op_outputs"
:
{
"Out"
:
[
"output_data"
]
},
"op_attrs"
:
dics
[
0
]
,
}
]
ops
=
self
.
generate_op_config
(
ops_config
)
program_config
=
ProgramConfig
(
ops
=
ops
,
weights
=
{
"weight"
:
TensorConfig
(
data_gen
=
partial
(
generate_weight
))
"weight"
:
TensorConfig
(
data_gen
=
partial
(
generate_weight
)
)
},
inputs
=
{
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input
,
shape
)
),
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input
,
shape
)
),
},
outputs
=
[
"output_data"
])
outputs
=
[
"output_data"
],
)
yield
program_config
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
# The input.dims[1] must be equal to the weight's length.
if
self
.
dims
==
4
:
...
...
@@ -106,19 +110,23 @@ class TrtConvertElementwiseTest_one_input_special_case0(TrtLayerAutoScanTest):
clear_dynamic_shape
()
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
(
1e-5
,
1e-5
)
attrs
,
False
),
(
1e-5
,
1e-5
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
(
1e-3
,
1e-3
)
attrs
,
False
),
(
1e-3
,
1e-3
)
# for dynamic_shape
generate_dynamic_shape
(
attrs
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
(
1e-5
,
1e-5
)
attrs
,
True
),
(
1e-5
,
1e-5
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
(
1e-3
,
1e-3
)
attrs
,
True
),
(
1e-3
,
1e-3
)
def
add_skip_trt_case
(
self
):
pass
...
...
@@ -130,12 +138,10 @@ class TrtConvertElementwiseTest_one_input_special_case0(TrtLayerAutoScanTest):
# This is the special test case
class
TrtConvertElementwiseTest_one_input_special_case1
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
return
True
def
sample_program_configs
(
self
):
def
generate_input
(
shape
):
return
np
.
random
.
random
(
shape
).
astype
(
np
.
float32
)
...
...
@@ -144,44 +150,47 @@ class TrtConvertElementwiseTest_one_input_special_case1(TrtLayerAutoScanTest):
for
shape
in
[[
32
]]:
for
op_type
in
[
"elementwise_add"
,
"elementwise_mul"
,
"elementwise_sub"
,
"elementwise_div"
,
"elementwise_pow"
,
"elementwise_min"
,
"elementwise_max"
"elementwise_add"
,
"elementwise_mul"
,
"elementwise_sub"
,
"elementwise_div"
,
"elementwise_pow"
,
"elementwise_min"
,
"elementwise_max"
,
]:
for
axis
in
[
-
1
]:
self
.
dims
=
len
(
shape
)
dics
=
[{
"axis"
:
axis
}]
ops_config
=
[{
"op_type"
:
op_type
,
"op_inputs"
:
{
"X"
:
[
"input_data"
],
"Y"
:
[
"weight"
]
},
"op_outputs"
:
{
"Out"
:
[
"output_data"
]
},
"op_attrs"
:
dics
[
0
]
}]
ops_config
=
[
{
"op_type"
:
op_type
,
"op_inputs"
:
{
"X"
:
[
"input_data"
],
"Y"
:
[
"weight"
]},
"op_outputs"
:
{
"Out"
:
[
"output_data"
]},
"op_attrs"
:
dics
[
0
],
}
]
ops
=
self
.
generate_op_config
(
ops_config
)
program_config
=
ProgramConfig
(
ops
=
ops
,
weights
=
{
"weight"
:
TensorConfig
(
data_gen
=
partial
(
generate_weight
))
"weight"
:
TensorConfig
(
data_gen
=
partial
(
generate_weight
)
)
},
inputs
=
{
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input
,
shape
)
),
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input
,
shape
)
),
},
outputs
=
[
"output_data"
])
outputs
=
[
"output_data"
],
)
yield
program_config
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
32
]}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
64
]}
...
...
@@ -205,19 +214,23 @@ class TrtConvertElementwiseTest_one_input_special_case1(TrtLayerAutoScanTest):
clear_dynamic_shape
()
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
(
1e-5
,
1e-5
)
attrs
,
False
),
(
1e-5
,
1e-5
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
(
1e-3
,
1e-3
)
attrs
,
False
),
(
1e-3
,
1e-3
)
# for dynamic_shape
generate_dynamic_shape
(
attrs
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
(
1e-5
,
1e-5
)
attrs
,
True
),
(
1e-5
,
1e-5
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
(
1e-3
,
1e-3
)
attrs
,
True
),
(
1e-3
,
1e-3
)
def
add_skip_trt_case
(
self
):
pass
...
...
@@ -228,12 +241,10 @@ class TrtConvertElementwiseTest_one_input_special_case1(TrtLayerAutoScanTest):
class
TrtConvertElementwiseTest_one_input
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
return
True
def
sample_program_configs
(
self
):
def
generate_input
(
shape
):
return
np
.
random
.
random
(
shape
).
astype
(
np
.
float32
)
...
...
@@ -241,47 +252,57 @@ class TrtConvertElementwiseTest_one_input(TrtLayerAutoScanTest):
return
np
.
random
.
randn
(
32
).
astype
(
np
.
float32
)
for
batch
in
[
1
,
4
]:
for
shape
in
[[
32
],
[
batch
,
32
],
[
batch
,
32
,
32
],
[
batch
,
32
,
16
,
32
]]:
for
shape
in
[
[
32
],
[
batch
,
32
],
[
batch
,
32
,
32
],
[
batch
,
32
,
16
,
32
],
]:
for
op_type
in
[
"elementwise_add"
,
"elementwise_mul"
,
"elementwise_sub"
,
"elementwise_div"
,
"elementwise_pow"
,
"elementwise_min"
,
"elementwise_max"
"elementwise_add"
,
"elementwise_mul"
,
"elementwise_sub"
,
"elementwise_div"
,
"elementwise_pow"
,
"elementwise_min"
,
"elementwise_max"
,
]:
for
axis
in
[
-
1
if
len
(
shape
)
==
1
else
1
]:
self
.
dims
=
len
(
shape
)
dics
=
[{
"axis"
:
axis
}]
ops_config
=
[
{
"op_type"
:
op_type
,
"op_inputs"
:
{
"
X"
:
[
"input_data"
],
"Y"
:
[
"weight
"
]
}
,
"op_outputs"
:
{
"Out"
:
[
"output_data"
]
}
,
"op_attrs"
:
dics
[
0
]
}
]
ops_config
=
[
{
"op_type"
:
op_type
,
"
op_inputs"
:
{
"X"
:
[
"input_data
"
]
,
"Y"
:
[
"weight"
]
,
},
"op_outputs"
:
{
"Out"
:
[
"output_data"
]
},
"op_attrs"
:
dics
[
0
]
,
}
]
ops
=
self
.
generate_op_config
(
ops_config
)
program_config
=
ProgramConfig
(
ops
=
ops
,
weights
=
{
"weight"
:
TensorConfig
(
data_gen
=
partial
(
generate_weight
))
"weight"
:
TensorConfig
(
data_gen
=
partial
(
generate_weight
)
)
},
inputs
=
{
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input
,
shape
)
),
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input
,
shape
)
),
},
outputs
=
[
"output_data"
])
outputs
=
[
"output_data"
],
)
yield
program_config
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
# The input.dims[1] must be equal to the weight's length.
if
self
.
dims
==
1
:
...
...
@@ -325,19 +346,23 @@ class TrtConvertElementwiseTest_one_input(TrtLayerAutoScanTest):
clear_dynamic_shape
()
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
(
1e-5
,
1e-5
)
attrs
,
False
),
(
1e-5
,
1e-5
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
(
1e-3
,
1e-3
)
attrs
,
False
),
(
1e-3
,
1e-3
)
# for dynamic_shape
generate_dynamic_shape
(
attrs
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
(
1e-5
,
1e-5
)
attrs
,
True
),
(
1e-5
,
1e-5
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
(
1e-3
,
1e-3
)
attrs
,
True
),
(
1e-3
,
1e-3
)
def
add_skip_trt_case
(
self
):
pass
...
...
@@ -348,108 +373,112 @@ class TrtConvertElementwiseTest_one_input(TrtLayerAutoScanTest):
class
TrtConvertElementwiseTest_two_input_without_broadcast
(
TrtLayerAutoScanTest
):
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
return
True
def
sample_program_configs
(
self
):
def
generate_input
(
shape
):
return
np
.
random
.
random
(
shape
).
astype
(
np
.
float32
)
for
shape
in
[[
4
],
[
4
,
32
],
[
2
,
64
,
32
],
[
1
,
8
,
16
,
32
]]:
for
shape
in
[[
4
],
[
4
,
32
],
[
2
,
32
,
16
],
[
1
,
8
,
16
,
32
]]:
for
op_type
in
[
"elementwise_add"
,
"elementwise_mul"
,
"elementwise_sub"
,
"elementwise_div"
,
"elementwise_pow"
,
"elementwise_min"
,
"elementwise_max"
"elementwise_add"
,
"elementwise_mul"
,
"elementwise_sub"
,
"elementwise_div"
,
"elementwise_pow"
,
"elementwise_min"
,
"elementwise_max"
,
]:
for
axis
in
[
0
,
-
1
]:
self
.
dims
=
len
(
shape
)
dics
=
[{
"axis"
:
axis
}]
ops_config
=
[
{
"op_type"
:
op_type
,
"op_inputs"
:
{
"
X"
:
[
"input_data1"
],
"Y
"
:
[
"input_data
2
"
]
}
,
"op_outputs"
:
{
"Out"
:
[
"output_data"
]
}
,
"op_attrs"
:
dics
[
0
]
}
]
ops_config
=
[
{
"op_type"
:
op_type
,
"
op_inputs"
:
{
"X
"
:
[
"input_data
1
"
]
,
"Y"
:
[
"input_data2"
]
,
},
"op_outputs"
:
{
"Out"
:
[
"output_data"
]
},
"op_attrs"
:
dics
[
0
]
,
}
]
ops
=
self
.
generate_op_config
(
ops_config
)
program_config
=
ProgramConfig
(
ops
=
ops
,
weights
=
{},
inputs
=
{
"input_data1"
:
TensorConfig
(
data_gen
=
partial
(
generate_input
,
shape
)
),
"input_data2"
:
TensorConfig
(
data_gen
=
partial
(
generate_input
,
shape
)
)
"input_data1"
:
TensorConfig
(
data_gen
=
partial
(
generate_input
,
shape
)
),
"input_data2"
:
TensorConfig
(
data_gen
=
partial
(
generate_input
,
shape
)
)
,
},
outputs
=
[
"output_data"
])
outputs
=
[
"output_data"
],
)
yield
program_config
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
if
self
.
dims
==
1
:
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data1"
:
[
1
],
"input_data2"
:
[
1
]
"input_data2"
:
[
1
]
,
}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data1"
:
[
128
],
"input_data2"
:
[
128
]
"input_data2"
:
[
128
]
,
}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data1"
:
[
32
],
"input_data2"
:
[
32
]
"input_data2"
:
[
32
]
,
}
elif
self
.
dims
==
2
:
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data1"
:
[
1
,
4
],
"input_data2"
:
[
1
,
4
]
"input_data2"
:
[
1
,
4
]
,
}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data1"
:
[
128
,
256
],
"input_data2"
:
[
128
,
256
]
"input_data2"
:
[
128
,
256
]
,
}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data1"
:
[
32
,
64
],
"input_data2"
:
[
32
,
64
]
"input_data2"
:
[
32
,
64
]
,
}
elif
self
.
dims
==
3
:
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data1"
:
[
1
,
4
,
4
],
"input_data2"
:
[
1
,
4
,
4
]
"input_data2"
:
[
1
,
4
,
4
]
,
}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data1"
:
[
128
,
128
,
256
],
"input_data2"
:
[
128
,
128
,
256
]
"input_data2"
:
[
128
,
128
,
256
]
,
}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data1"
:
[
2
,
64
,
64
],
"input_data2"
:
[
2
,
64
,
64
]
"input_data1"
:
[
2
,
32
,
16
],
"input_data2"
:
[
2
,
32
,
16
],
}
elif
self
.
dims
==
4
:
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data1"
:
[
1
,
4
,
4
,
4
],
"input_data2"
:
[
1
,
4
,
4
,
4
]
"input_data2"
:
[
1
,
4
,
4
,
4
]
,
}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data1"
:
[
8
,
128
,
64
,
128
],
"input_data2"
:
[
8
,
128
,
64
,
128
]
"input_data2"
:
[
8
,
128
,
64
,
128
]
,
}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data1"
:
[
2
,
64
,
32
,
32
],
"input_data2"
:
[
2
,
64
,
32
,
32
]
"input_data2"
:
[
2
,
64
,
32
,
32
]
,
}
def
clear_dynamic_shape
():
...
...
@@ -470,10 +499,12 @@ class TrtConvertElementwiseTest_two_input_without_broadcast(
clear_dynamic_shape
()
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
(
1e-5
,
1e-5
)
attrs
,
False
),
(
1e-5
,
1e-5
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
(
1e-3
,
1e-3
)
attrs
,
False
),
(
1e-3
,
1e-3
)
# for dynamic_shape
generate_dynamic_shape
(
attrs
)
...
...
@@ -491,7 +522,6 @@ class TrtConvertElementwiseTest_two_input_without_broadcast(
class
TrtConvertElementwiseTest_two_input_with_broadcast
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
inputs
=
program_config
.
inputs
if
len
(
inputs
[
'input_data1'
].
shape
)
!=
len
(
inputs
[
'input_data2'
].
shape
):
...
...
@@ -500,7 +530,6 @@ class TrtConvertElementwiseTest_two_input_with_broadcast(TrtLayerAutoScanTest):
return
True
def
sample_program_configs
(
self
):
def
generate_input
(
shape
):
return
np
.
random
.
random
(
shape
).
astype
(
np
.
float32
)
...
...
@@ -512,8 +541,12 @@ class TrtConvertElementwiseTest_two_input_with_broadcast(TrtLayerAutoScanTest):
input2_shape5_list
=
[[
32
],
[
2
,
1
,
32
],
[
4
,
1
,
1
,
32
]]
input2_shape6_list
=
[[
1
,
32
],
[
1
,
32
],
[
1
,
1
,
1
,
32
]]
input2_shape_list
=
[
input2_shape1_list
,
input2_shape2_list
,
input2_shape3_list
,
input2_shape4_list
,
input2_shape5_list
,
input2_shape6_list
input2_shape1_list
,
input2_shape2_list
,
input2_shape3_list
,
input2_shape4_list
,
input2_shape5_list
,
input2_shape6_list
,
]
axis1_list
=
[[
-
1
],
[
1
,
-
1
],
[
1
,
-
1
]]
axis2_list
=
[[
-
1
],
[
0
],
[
0
]]
...
...
@@ -522,8 +555,12 @@ class TrtConvertElementwiseTest_two_input_with_broadcast(TrtLayerAutoScanTest):
axis5_list
=
[[
-
1
,
1
],
[
-
1
,
0
],
[
-
1
,
0
]]
axis6_list
=
[[
-
1
,
0
],
[
-
1
,
1
],
[
-
1
,
0
]]
axis_list
=
[
axis1_list
,
axis2_list
,
axis3_list
,
axis4_list
,
axis5_list
,
axis6_list
axis1_list
,
axis2_list
,
axis3_list
,
axis4_list
,
axis5_list
,
axis6_list
,
]
for
i
in
range
(
3
):
...
...
@@ -531,66 +568,75 @@ class TrtConvertElementwiseTest_two_input_with_broadcast(TrtLayerAutoScanTest):
for
j
in
range
(
6
):
input2_shape
=
input2_shape_list
[
j
][
i
]
for
op_type
in
[
"elementwise_add"
,
"elementwise_mul"
,
"elementwise_sub"
,
"elementwise_div"
,
"elementwise_pow"
,
"elementwise_min"
,
"elementwise_max"
,
"elementwise_add"
,
"elementwise_mul"
,
"elementwise_sub"
,
"elementwise_div"
,
"elementwise_pow"
,
"elementwise_min"
,
"elementwise_max"
,
]:
for
axis
in
axis_list
[
j
][
i
]:
self
.
shape1
=
input1_shape
self
.
shape2
=
input2_shape
dics
=
[{
"axis"
:
axis
}]
ops_config
=
[
{
"op_type"
:
op_type
,
"op_inputs"
:
{
"
X"
:
[
"input_data1"
],
"Y
"
:
[
"input_data
2
"
]
}
,
"op_outputs"
:
{
"Out"
:
[
"output_data"
]
}
,
"op_attrs"
:
dics
[
0
]
}
]
ops_config
=
[
{
"op_type"
:
op_type
,
"
op_inputs"
:
{
"X
"
:
[
"input_data
1
"
]
,
"Y"
:
[
"input_data2"
]
,
},
"op_outputs"
:
{
"Out"
:
[
"output_data"
]
},
"op_attrs"
:
dics
[
0
]
,
}
]
ops
=
self
.
generate_op_config
(
ops_config
)
program_config
=
ProgramConfig
(
ops
=
ops
,
weights
=
{},
inputs
=
{
"input_data1"
:
TensorConfig
(
data_gen
=
partial
(
generate_input
,
input1_shape
)),
"input_data2"
:
TensorConfig
(
data_gen
=
partial
(
generate_input
,
input2_shape
))
"input_data1"
:
TensorConfig
(
data_gen
=
partial
(
generate_input
,
input1_shape
)
),
"input_data2"
:
TensorConfig
(
data_gen
=
partial
(
generate_input
,
input2_shape
)
),
},
outputs
=
[
"output_data"
])
outputs
=
[
"output_data"
],
)
yield
program_config
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
max_shape
=
[[
128
],
[
128
,
128
],
[
128
,
128
,
128
],
[
128
,
128
,
128
,
128
]]
max_shape
=
[
[
128
],
[
128
,
128
],
[
128
,
128
,
128
],
[
128
,
128
,
128
,
128
],
]
min_shape
=
[[
1
],
[
1
,
1
],
[
1
,
1
,
1
],
[
1
,
1
,
1
,
1
]]
opt_shape
=
[[
32
],
[
32
,
32
],
[
32
,
32
,
32
],
[
32
,
32
,
32
,
32
]]
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data1"
:
min_shape
[
len
(
self
.
shape1
)
-
1
],
"input_data2"
:
min_shape
[
len
(
self
.
shape2
)
-
1
]
"input_data2"
:
min_shape
[
len
(
self
.
shape2
)
-
1
]
,
}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data1"
:
max_shape
[
len
(
self
.
shape1
)
-
1
],
"input_data2"
:
max_shape
[
len
(
self
.
shape2
)
-
1
]
"input_data2"
:
max_shape
[
len
(
self
.
shape2
)
-
1
]
,
}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data1"
:
opt_shape
[
len
(
self
.
shape1
)
-
1
],
"input_data2"
:
opt_shape
[
len
(
self
.
shape2
)
-
1
]
"input_data2"
:
opt_shape
[
len
(
self
.
shape2
)
-
1
]
,
}
def
clear_dynamic_shape
():
...
...
@@ -626,12 +672,10 @@ class TrtConvertElementwiseTest_two_input_with_broadcast(TrtLayerAutoScanTest):
class
TrtConvertElementwiseTest_one_input_corner_case
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
return
True
def
sample_program_configs
(
self
):
def
generate_input
(
shape
):
return
np
.
random
.
random
(
shape
).
astype
(
np
.
float32
)
...
...
@@ -640,52 +684,58 @@ class TrtConvertElementwiseTest_one_input_corner_case(TrtLayerAutoScanTest):
return
np
.
random
.
rand
(
32
).
astype
(
np
.
float32
)
for
batch
in
[
1
,
2
,
4
]:
for
shape
in
[[
32
],
[
batch
,
32
],
[
batch
,
32
,
32
],
[
batch
,
32
,
16
,
32
]]:
for
shape
in
[
[
32
],
[
batch
,
32
],
[
batch
,
32
,
32
],
[
batch
,
32
,
16
,
32
],
]:
for
op_type
in
[
"elementwise_add"
,
"elementwise_mul"
,
"elementwise_sub"
,
"elementwise_div"
,
"elementwise_pow"
,
"elementwise_min"
,
"elementwise_max"
,
"elementwise_add"
,
"elementwise_mul"
,
"elementwise_sub"
,
"elementwise_div"
,
"elementwise_pow"
,
"elementwise_min"
,
"elementwise_max"
,
]:
self
.
op_type
=
op_type
for
axis
in
[
-
1
if
len
(
shape
)
==
1
else
1
]:
self
.
dims
=
len
(
shape
)
dics
=
[{
"axis"
:
axis
}]
ops_config
=
[
{
"op_type"
:
op_type
,
"op_inputs"
:
{
"
X"
:
[
"weight"
],
"Y"
:
[
"input_data
"
]
}
,
"op_outputs"
:
{
"Out"
:
[
"output_data"
]
}
,
"op_attrs"
:
dics
[
0
]
}
]
ops_config
=
[
{
"op_type"
:
op_type
,
"
op_inputs"
:
{
"X"
:
[
"weight
"
]
,
"Y"
:
[
"input_data"
]
,
},
"op_outputs"
:
{
"Out"
:
[
"output_data"
]
},
"op_attrs"
:
dics
[
0
]
,
}
]
ops
=
self
.
generate_op_config
(
ops_config
)
program_config
=
ProgramConfig
(
ops
=
ops
,
weights
=
{
"weight"
:
TensorConfig
(
data_gen
=
partial
(
generate_weight
))
"weight"
:
TensorConfig
(
data_gen
=
partial
(
generate_weight
)
)
},
inputs
=
{
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input
,
shape
)
),
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input
,
shape
)
),
},
outputs
=
[
"output_data"
])
outputs
=
[
"output_data"
],
)
yield
program_config
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
# The input.dims[1] must be equal to the weight's length.
if
self
.
dims
==
1
:
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_equal.py
View file @
dbe08e9b
...
...
@@ -22,7 +22,6 @@ from typing import Optional, List, Callable, Dict, Any, Set
class
TrtConvertElementwiseTest_one_input_corner_case
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
attrs
=
[
program_config
.
ops
[
i
].
attrs
for
i
in
range
(
len
(
program_config
.
ops
))
...
...
@@ -35,7 +34,6 @@ class TrtConvertElementwiseTest_one_input_corner_case(TrtLayerAutoScanTest):
return
True
def
sample_program_configs
(
self
):
def
generate_input
(
shape
):
return
np
.
random
.
random
(
shape
).
astype
(
np
.
float32
)
...
...
@@ -44,86 +42,84 @@ class TrtConvertElementwiseTest_one_input_corner_case(TrtLayerAutoScanTest):
for
axis
in
[
-
1
if
len
(
shape
)
==
1
else
1
]:
self
.
dims
=
len
(
shape
)
dics
=
[{
"axis"
:
axis
},
{
"in_dtype"
:
0
,
"out_dtype"
:
5
}]
ops_config
=
[{
"op_type"
:
"equal"
,
"op_inputs"
:
{
"X"
:
[
"input_data1"
],
"Y"
:
[
"input_data2"
]
},
"op_outputs"
:
{
"Out"
:
[
"compare_output_data"
]
ops_config
=
[
{
"op_type"
:
"equal"
,
"op_inputs"
:
{
"X"
:
[
"input_data1"
],
"Y"
:
[
"input_data2"
],
},
"op_outputs"
:
{
"Out"
:
[
"compare_output_data"
]},
"op_attrs"
:
dics
[
0
],
},
"op_attrs"
:
dics
[
0
]
},
{
"op_type
"
:
"c
ast"
,
"op_
in
puts"
:
{
"
X"
:
[
"compare_output_data"
]
{
"op_type"
:
"cast"
,
"op_inputs"
:
{
"X
"
:
[
"c
ompare_output_data"
]}
,
"op_
out
puts"
:
{
"Out"
:
[
"output_data"
]},
"
op_attrs"
:
dics
[
1
],
},
"op_outputs"
:
{
"Out"
:
[
"output_data"
]
},
"op_attrs"
:
dics
[
1
]
}]
]
ops
=
self
.
generate_op_config
(
ops_config
)
program_config
=
ProgramConfig
(
ops
=
ops
,
weights
=
{},
inputs
=
{
"input_data1"
:
TensorConfig
(
data_gen
=
partial
(
generate_input
,
shape
)
),
"input_data2"
:
TensorConfig
(
data_gen
=
partial
(
generate_input
,
shape
)
)
"input_data1"
:
TensorConfig
(
data_gen
=
partial
(
generate_input
,
shape
)
),
"input_data2"
:
TensorConfig
(
data_gen
=
partial
(
generate_input
,
shape
)
)
,
},
outputs
=
[
"output_data"
])
outputs
=
[
"output_data"
],
)
yield
program_config
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
# The input.dims[1] must be equal to the weight's length.
if
self
.
dims
==
2
:
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data1"
:
[
1
,
1
],
"input_data2"
:
[
1
,
1
]
"input_data2"
:
[
1
,
1
]
,
}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data1"
:
[
4
,
1
],
"input_data2"
:
[
4
,
1
]
"input_data2"
:
[
4
,
1
]
,
}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data1"
:
[
2
,
1
],
"input_data2"
:
[
2
,
1
]
"input_data2"
:
[
2
,
1
]
,
}
elif
self
.
dims
==
3
:
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data1"
:
[
1
,
1
,
4
],
"input_data2"
:
[
1
,
1
,
4
]
"input_data2"
:
[
1
,
1
,
4
]
,
}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data1"
:
[
4
,
1
,
256
],
"input_data2"
:
[
1
,
1
,
256
]
"input_data2"
:
[
1
,
1
,
256
]
,
}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data1"
:
[
2
,
1
,
16
],
"input_data2"
:
[
2
,
1
,
16
]
"input_data2"
:
[
2
,
1
,
16
]
,
}
elif
self
.
dims
==
4
:
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data1"
:
[
1
,
1
,
4
,
4
],
"input_data2"
:
[
1
,
1
,
4
,
4
]
"input_data2"
:
[
1
,
1
,
4
,
4
]
,
}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data1"
:
[
4
,
1
,
128
,
256
],
"input_data2"
:
[
4
,
1
,
128
,
256
]
"input_data2"
:
[
4
,
1
,
128
,
256
]
,
}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data1"
:
[
2
,
1
,
32
,
16
],
"input_data2"
:
[
2
,
1
,
32
,
16
]
"input_data2"
:
[
2
,
1
,
32
,
16
]
,
}
def
clear_dynamic_shape
():
...
...
@@ -144,19 +140,23 @@ class TrtConvertElementwiseTest_one_input_corner_case(TrtLayerAutoScanTest):
clear_dynamic_shape
()
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
1e-5
attrs
,
False
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
1e-5
attrs
,
False
),
1e-3
# for dynamic_shape
generate_dynamic_shape
(
attrs
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
1e-5
attrs
,
True
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
1e-5
attrs
,
True
),
1e-3
def
test
(
self
):
self
.
run_test
()
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_fc.py
View file @
dbe08e9b
...
...
@@ -23,10 +23,9 @@ import os
class
TrtConvertFcTest
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
# The output has diff between gpu and trt in CI windows
if
(
os
.
name
==
'nt'
)
:
if
os
.
name
==
'nt'
:
return
False
return
True
...
...
@@ -34,12 +33,14 @@ class TrtConvertFcTest(TrtLayerAutoScanTest):
self
.
trt_param
.
workspace_size
=
1073741824
def
generate_input1
(
batch
,
attrs
:
List
[
Dict
[
str
,
Any
]]):
return
np
.
random
.
random
([
batch
,
3
,
64
,
(
int
)(
attrs
[
0
][
"m"
]
/
2
),
2
]).
astype
(
np
.
float32
)
return
np
.
random
.
random
(
[
batch
,
3
,
64
,
(
int
)(
attrs
[
0
][
"m"
]
/
2
),
2
]
).
astype
(
np
.
float32
)
def
generate_w
(
batch
,
attrs
:
List
[
Dict
[
str
,
Any
]]):
return
np
.
random
.
random
([
attrs
[
0
][
"m"
],
attrs
[
0
][
"n"
]]).
astype
(
np
.
float32
)
return
np
.
random
.
random
([
attrs
[
0
][
"m"
],
attrs
[
0
][
"n"
]]).
astype
(
np
.
float32
)
def
generate_bias
(
batch
,
attrs
:
List
[
Dict
[
str
,
Any
]]):
return
np
.
random
.
random
([
attrs
[
0
][
"n"
]]).
astype
(
np
.
float32
)
...
...
@@ -53,7 +54,7 @@ class TrtConvertFcTest(TrtLayerAutoScanTest):
"m"
:
m
,
"n"
:
n
,
},
{}
{}
,
]
ops_config
=
[
...
...
@@ -62,12 +63,10 @@ class TrtConvertFcTest(TrtLayerAutoScanTest):
"op_inputs"
:
{
"Input"
:
[
"input_data"
],
"W"
:
[
"w_data"
],
"Bias"
:
[
"bias_data"
]
},
"op_outputs"
:
{
"Out"
:
[
"output_data"
]
"Bias"
:
[
"bias_data"
],
},
"op_attrs"
:
dics
[
0
]
"op_outputs"
:
{
"Out"
:
[
"output_data"
]},
"op_attrs"
:
dics
[
0
],
},
]
...
...
@@ -76,24 +75,26 @@ class TrtConvertFcTest(TrtLayerAutoScanTest):
program_config
=
ProgramConfig
(
ops
=
ops
,
weights
=
{
"w_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_w
,
batch
,
dics
)),
"bias_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_bias
,
batch
,
dics
))
"w_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_w
,
batch
,
dics
)
),
"bias_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_bias
,
batch
,
dics
)
),
},
inputs
=
{
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input1
,
batch
,
dics
)
),
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input1
,
batch
,
dics
)
),
},
outputs
=
[
"output_data"
])
outputs
=
[
"output_data"
],
)
yield
program_config
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
3
,
32
,
16
,
2
],
...
...
@@ -121,19 +122,23 @@ class TrtConvertFcTest(TrtLayerAutoScanTest):
# clear_dynamic_shape()
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
1e-5
attrs
,
False
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
(
1e-5
,
1e-5
)
attrs
,
False
),
(
1e-3
,
1e-3
)
# for dynamic_shape
generate_dynamic_shape
(
attrs
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
1e-5
attrs
,
True
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
(
1e-5
,
1e-5
)
attrs
,
True
),
(
1e-3
,
1e-3
)
def
test
(
self
):
self
.
run_test
()
...
...
@@ -143,10 +148,9 @@ class TrtConvertFcTest(TrtLayerAutoScanTest):
class
TrtConvertFcTest2
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
# The output has diff between gpu and trt in CI windows
if
(
os
.
name
==
'nt'
)
:
if
os
.
name
==
'nt'
:
return
False
return
True
...
...
@@ -157,8 +161,9 @@ class TrtConvertFcTest2(TrtLayerAutoScanTest):
return
np
.
random
.
random
([
batch
,
3
,
64
,
14
]).
astype
(
np
.
float32
)
def
generate_w
(
batch
,
attrs
:
List
[
Dict
[
str
,
Any
]]):
return
np
.
random
.
random
([
attrs
[
0
][
"m"
],
attrs
[
0
][
"n"
]]).
astype
(
np
.
float32
)
return
np
.
random
.
random
([
attrs
[
0
][
"m"
],
attrs
[
0
][
"n"
]]).
astype
(
np
.
float32
)
def
generate_bias
(
batch
,
attrs
:
List
[
Dict
[
str
,
Any
]]):
return
np
.
random
.
random
([
attrs
[
0
][
"n"
]]).
astype
(
np
.
float32
)
...
...
@@ -172,7 +177,7 @@ class TrtConvertFcTest2(TrtLayerAutoScanTest):
"m"
:
m
,
"n"
:
n
,
},
{}
{}
,
]
ops_config
=
[
...
...
@@ -181,12 +186,10 @@ class TrtConvertFcTest2(TrtLayerAutoScanTest):
"op_inputs"
:
{
"Input"
:
[
"input_data"
],
"W"
:
[
"w_data"
],
"Bias"
:
[
"bias_data"
]
"Bias"
:
[
"bias_data"
]
,
},
"op_outputs"
:
{
"Out"
:
[
"output_data"
]
},
"op_attrs"
:
dics
[
0
]
"op_outputs"
:
{
"Out"
:
[
"output_data"
]},
"op_attrs"
:
dics
[
0
],
},
]
...
...
@@ -195,24 +198,26 @@ class TrtConvertFcTest2(TrtLayerAutoScanTest):
program_config
=
ProgramConfig
(
ops
=
ops
,
weights
=
{
"w_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_w
,
batch
,
dics
)),
"bias_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_bias
,
batch
,
dics
))
"w_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_w
,
batch
,
dics
)
),
"bias_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_bias
,
batch
,
dics
)
),
},
inputs
=
{
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input1
,
batch
,
dics
)
),
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input1
,
batch
,
dics
)
),
},
outputs
=
[
"output_data"
])
outputs
=
[
"output_data"
],
)
yield
program_config
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
():
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
3
,
32
,
14
],
...
...
@@ -234,14 +239,14 @@ class TrtConvertFcTest2(TrtLayerAutoScanTest):
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
(
1
,
2
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
(
1
,
2
),
(
1e-
5
,
1e-
5
)
yield
self
.
create_inference_config
(),
(
1
,
2
),
(
1e-
3
,
1e-
3
)
# for dynamic_shape
generate_dynamic_shape
()
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
(
1
,
2
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
(
1
,
2
),
(
1e-
5
,
1e-
5
)
yield
self
.
create_inference_config
(),
(
1
,
2
),
(
1e-
3
,
1e-
3
)
def
test
(
self
):
self
.
run_test
()
...
...
@@ -277,7 +282,7 @@ class TrtConvertFcTest3(TrtLayerAutoScanTest):
"m"
:
m
,
"n"
:
n
,
},
{}
{}
,
]
ops_config
=
[
...
...
@@ -286,12 +291,10 @@ class TrtConvertFcTest3(TrtLayerAutoScanTest):
"op_inputs"
:
{
"Input"
:
[
"input_data"
],
"W"
:
[
"w_data"
],
"Bias"
:
[
"bias_data"
]
"Bias"
:
[
"bias_data"
]
,
},
"op_outputs"
:
{
"Out"
:
[
"output_data"
]
},
"op_attrs"
:
dics
[
0
]
"op_outputs"
:
{
"Out"
:
[
"output_data"
]},
"op_attrs"
:
dics
[
0
],
},
]
...
...
@@ -300,24 +303,26 @@ class TrtConvertFcTest3(TrtLayerAutoScanTest):
program_config
=
ProgramConfig
(
ops
=
ops
,
weights
=
{
"w_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_w
,
batch
,
dics
)),
"bias_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_bias
,
batch
,
dics
))
"w_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_w
,
batch
,
dics
)
),
"bias_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_bias
,
batch
,
dics
)
),
},
inputs
=
{
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input1
,
batch
,
dics
)
),
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input1
,
batch
,
dics
)
),
},
outputs
=
[
"output_data"
])
outputs
=
[
"output_data"
],
)
yield
program_config
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
():
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
14
,
1
,
2
],
...
...
@@ -339,16 +344,16 @@ class TrtConvertFcTest3(TrtLayerAutoScanTest):
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
(
1
,
2
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
(
1
,
2
),
(
1e-
5
,
1e-
5
)
yield
self
.
create_inference_config
(),
(
1
,
2
),
(
1e-
3
,
1e-
3
)
# for dynamic_shape
generate_dynamic_shape
()
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
(
1
,
2
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
(
1
,
2
),
(
1e-
5
,
1e-
5
)
yield
self
.
create_inference_config
(),
(
1
,
2
),
(
1e-
3
,
1e-
3
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Int8
yield
self
.
create_inference_config
(),
(
1
,
2
),
(
1e-
5
,
1e-
5
)
yield
self
.
create_inference_config
(),
(
1
,
2
),
(
1e-
3
,
1e-
3
)
def
test
(
self
):
self
.
run_test
()
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_fill_constant.py
View file @
dbe08e9b
...
...
@@ -22,12 +22,10 @@ from typing import Optional, List, Callable, Dict, Any, Set
class
TrtConvertSplitTest
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
return
True
def
sample_program_configs
(
self
):
def
generate_value_data
(
attrs
:
List
[
Dict
[
str
,
Any
]]):
return
np
.
array
([
1
]).
astype
(
np
.
int32
)
...
...
@@ -47,21 +45,28 @@ class TrtConvertSplitTest(TrtLayerAutoScanTest):
str_value
=
str_value
else
:
str_value
=
""
dics
=
[{
"str_value"
:
str_value
,
"value"
:
value
,
"shape"
:
shape
,
"dtype"
:
dtype
},
{
"axis"
:
-
1
}]
dics_intput
=
[{
"ValueTensor"
:
[
"value_data"
]
},
{
"ShapeTensor"
:
[
"shape_data"
],
},
{
"ShapeTensorList"
:
[
"shapeT1_data"
,
"shapeT2_data"
],
},
{}]
dics
=
[
{
"str_value"
:
str_value
,
"value"
:
value
,
"shape"
:
shape
,
"dtype"
:
dtype
,
},
{
"axis"
:
-
1
},
]
dics_intput
=
[
{
"ValueTensor"
:
[
"value_data"
]},
{
"ShapeTensor"
:
[
"shape_data"
],
},
{
"ShapeTensorList"
:
[
"shapeT1_data"
,
"shapeT2_data"
,
],
},
{},
]
ops_config
=
[
{
"op_type"
:
"fill_constant"
,
...
...
@@ -69,7 +74,7 @@ class TrtConvertSplitTest(TrtLayerAutoScanTest):
"op_outputs"
:
{
"Out"
:
[
"out_data"
],
},
"op_attrs"
:
dics
[
0
]
"op_attrs"
:
dics
[
0
]
,
},
]
...
...
@@ -81,26 +86,31 @@ class TrtConvertSplitTest(TrtLayerAutoScanTest):
ops
=
ops
,
weights
=
{},
inputs
=
{
"value_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_value_data
,
dics
)),
"shape_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_shape_data
,
dics
)),
"shapeT1_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_shapelist_data
,
dics
)),
"shapeT2_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_shapelist_data
,
dics
)),
"value_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_value_data
,
dics
)
),
"shape_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_shape_data
,
dics
)
),
"shapeT1_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_shapelist_data
,
dics
)
),
"shapeT2_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_shapelist_data
,
dics
)
),
},
outputs
=
[
"out_data"
])
outputs
=
[
"out_data"
],
)
yield
program_config
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
self
.
input_shape
=
[
1
,
1
]
max_shape
=
list
(
self
.
input_shape
)
...
...
@@ -118,7 +128,7 @@ class TrtConvertSplitTest(TrtLayerAutoScanTest):
self
.
dynamic_shape
.
opt_input_shape
=
{}
def
generate_trt_nodes_num
(
attrs
,
dynamic_shape
):
if
(
self
.
num_input
<
3
)
:
if
self
.
num_input
<
3
:
return
0
,
6
return
1
,
5
...
...
@@ -131,10 +141,12 @@ class TrtConvertSplitTest(TrtLayerAutoScanTest):
generate_dynamic_shape
(
attrs
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
1e-5
attrs
,
True
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
1e-5
attrs
,
True
),
1e-3
def
add_skip_trt_case
(
self
):
pass
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_flatten.py
View file @
dbe08e9b
...
...
@@ -22,16 +22,14 @@ from typing import Optional, List, Callable, Dict, Any, Set
class
TrtConvertFlattenTest_dim_2
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
return
True
def
sample_program_configs
(
self
):
def
generate_input
(
batch
):
return
np
.
random
.
random
([
batch
,
32
]).
astype
(
np
.
float32
)
for
batch
in
[
1
,
2
,
4
]:
for
batch
in
[
1
,
4
]:
for
axis
in
[
0
,
1
]:
for
type
in
[
"flatten"
,
"flatten2"
]:
if
type
==
"flatten"
:
...
...
@@ -39,34 +37,35 @@ class TrtConvertFlattenTest_dim_2(TrtLayerAutoScanTest):
else
:
op_outputs
=
{
"Out"
:
[
"output_data"
],
"XShape"
:
[
"xshape_data"
]
"XShape"
:
[
"xshape_data"
]
,
}
dics
=
[{
"axis"
:
axis
}]
ops_config
=
[
{
"op_type"
:
"flatten"
,
"op_inputs"
:
{
"X"
:
[
"input_data"
]
}
,
"op_outputs"
:
op_outputs
,
"op_attrs"
:
dics
[
0
]
}
]
ops_config
=
[
{
"op_type"
:
"flatten"
,
"op_inputs"
:
{
"X"
:
[
"input_data"
]
},
"op_outputs"
:
op_outputs
,
"op_attrs"
:
dics
[
0
]
,
}
]
ops
=
self
.
generate_op_config
(
ops_config
)
program_config
=
ProgramConfig
(
ops
=
ops
,
weights
=
{},
inputs
=
{
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input
,
batch
)
)
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input
,
batch
)
)
},
outputs
=
[
"output_data"
])
outputs
=
[
"output_data"
],
)
yield
program_config
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
8
]}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
4
,
64
]}
...
...
@@ -100,35 +99,37 @@ class TrtConvertFlattenTest_dim_2(TrtLayerAutoScanTest):
# for static_shape
clear_dynamic_shape
()
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
1e-5
attrs
,
False
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
(
1e-3
,
1e-3
)
attrs
,
False
),
(
1e-3
,
1e-3
)
# for dynamic_shape
generate_dynamic_shape
(
attrs
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
1e-5
attrs
,
True
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
(
1e-3
,
1e-3
)
attrs
,
True
),
(
1e-3
,
1e-3
)
def
test
(
self
):
self
.
run_test
()
class
TrtConvertFlattenTest_dim_3
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
return
True
def
sample_program_configs
(
self
):
def
generate_input
(
batch
):
return
np
.
random
.
random
([
batch
,
32
,
64
]).
astype
(
np
.
float32
)
for
batch
in
[
1
,
2
,
4
]:
for
batch
in
[
1
,
4
]:
for
axis
in
[
0
,
1
,
2
]:
for
type
in
[
"flatten"
,
"flatten2"
]:
if
type
==
"flatten"
:
...
...
@@ -136,38 +137,39 @@ class TrtConvertFlattenTest_dim_3(TrtLayerAutoScanTest):
else
:
op_outputs
=
{
"Out"
:
[
"output_data"
],
"XShape"
:
[
"xshape_data"
]
"XShape"
:
[
"xshape_data"
]
,
}
dics
=
[{
"axis"
:
axis
}]
ops_config
=
[
{
"op_type"
:
"flatten"
,
"op_inputs"
:
{
"X"
:
[
"input_data"
]
}
,
"op_outputs"
:
op_outputs
,
"op_attrs"
:
dics
[
0
]
}
]
ops_config
=
[
{
"op_type"
:
"flatten"
,
"op_inputs"
:
{
"X"
:
[
"input_data"
]
},
"op_outputs"
:
op_outputs
,
"op_attrs"
:
dics
[
0
]
,
}
]
ops
=
self
.
generate_op_config
(
ops_config
)
program_config
=
ProgramConfig
(
ops
=
ops
,
weights
=
{},
inputs
=
{
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input
,
batch
)
)
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input
,
batch
)
)
},
outputs
=
[
"output_data"
])
outputs
=
[
"output_data"
],
)
yield
program_config
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
8
,
8
]}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
4
,
64
,
768
]}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
2
,
32
,
25
6
]}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
4
,
32
,
64
]}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
2
,
32
,
6
4
]}
def
clear_dynamic_shape
():
self
.
dynamic_shape
.
max_input_shape
=
{}
...
...
@@ -198,35 +200,37 @@ class TrtConvertFlattenTest_dim_3(TrtLayerAutoScanTest):
clear_dynamic_shape
()
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
1e-5
attrs
,
False
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
(
1e-3
,
1e-3
)
attrs
,
False
),
(
1e-3
,
1e-3
)
# for dynamic_shape
generate_dynamic_shape
(
attrs
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
1e-5
attrs
,
True
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
(
1e-3
,
1e-3
)
attrs
,
True
),
(
1e-3
,
1e-3
)
def
test
(
self
):
self
.
run_test
()
class
TrtConvertFlattenTest_dim_4
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
return
True
def
sample_program_configs
(
self
):
def
generate_input
(
batch
):
return
np
.
random
.
random
([
batch
,
8
,
8
,
8
]).
astype
(
np
.
float32
)
for
batch
in
[
1
,
2
,
4
]:
for
batch
in
[
1
,
4
]:
for
axis
in
[
0
,
1
,
2
,
3
]:
for
type
in
[
"flatten"
,
"flatten2"
]:
if
type
==
"flatten"
:
...
...
@@ -234,37 +238,38 @@ class TrtConvertFlattenTest_dim_4(TrtLayerAutoScanTest):
else
:
op_outputs
=
{
"Out"
:
[
"output_data"
],
"XShape"
:
[
"xshape_data"
]
"XShape"
:
[
"xshape_data"
]
,
}
dics
=
[{
"axis"
:
axis
}]
ops_config
=
[
{
"op_type"
:
"flatten"
,
"op_inputs"
:
{
"X"
:
[
"input_data"
]
}
,
"op_outputs"
:
op_outputs
,
"op_attrs"
:
dics
[
0
]
}
]
ops_config
=
[
{
"op_type"
:
"flatten"
,
"op_inputs"
:
{
"X"
:
[
"input_data"
]
},
"op_outputs"
:
op_outputs
,
"op_attrs"
:
dics
[
0
]
,
}
]
ops
=
self
.
generate_op_config
(
ops_config
)
program_config
=
ProgramConfig
(
ops
=
ops
,
weights
=
{},
inputs
=
{
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input
,
batch
)
)
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input
,
batch
)
)
},
outputs
=
[
"output_data"
])
outputs
=
[
"output_data"
],
)
yield
program_config
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
4
,
4
,
4
]}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
4
,
32
,
64
,
64
]}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
4
,
32
,
32
,
32
]}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
2
,
16
,
16
,
8
]}
def
clear_dynamic_shape
():
...
...
@@ -294,36 +299,39 @@ class TrtConvertFlattenTest_dim_4(TrtLayerAutoScanTest):
# for static_shape
clear_dynamic_shape
()
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
1e-5
attrs
,
False
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
(
1e-3
,
1e-3
)
attrs
,
False
),
(
1e-3
,
1e-3
)
# for dynamic_shape
generate_dynamic_shape
(
attrs
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
1e-5
attrs
,
True
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
(
1e-3
,
1e-3
)
attrs
,
True
),
(
1e-3
,
1e-3
)
def
test
(
self
):
self
.
run_test
()
class
TrtConvertFlattenTest_dim_5
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
return
True
def
sample_program_configs
(
self
):
def
generate_input
(
batch
):
return
np
.
random
.
random
([
batch
,
8
,
8
,
8
]).
astype
(
np
.
float32
)
for
batch
in
[
1
,
2
,
4
]:
for
batch
in
[
1
,
4
]:
for
axis
in
[
0
,
1
,
2
,
3
,
4
]:
for
type
in
[
"flatten"
,
"flatten2"
]:
if
type
==
"flatten"
:
...
...
@@ -331,37 +339,38 @@ class TrtConvertFlattenTest_dim_5(TrtLayerAutoScanTest):
else
:
op_outputs
=
{
"Out"
:
[
"output_data"
],
"XShape"
:
[
"xshape_data"
]
"XShape"
:
[
"xshape_data"
]
,
}
dics
=
[{
"axis"
:
axis
}]
ops_config
=
[
{
"op_type"
:
"flatten"
,
"op_inputs"
:
{
"X"
:
[
"input_data"
]
}
,
"op_outputs"
:
op_outputs
,
"op_attrs"
:
dics
[
0
]
}
]
ops_config
=
[
{
"op_type"
:
"flatten"
,
"op_inputs"
:
{
"X"
:
[
"input_data"
]
},
"op_outputs"
:
op_outputs
,
"op_attrs"
:
dics
[
0
]
,
}
]
ops
=
self
.
generate_op_config
(
ops_config
)
program_config
=
ProgramConfig
(
ops
=
ops
,
weights
=
{},
inputs
=
{
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input
,
batch
)
)
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input
,
batch
)
)
},
outputs
=
[
"output_data"
])
outputs
=
[
"output_data"
],
)
yield
program_config
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
4
,
4
,
4
]}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
4
,
32
,
64
,
64
]}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
4
,
16
,
16
,
8
]}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
2
,
16
,
16
,
8
]}
def
clear_dynamic_shape
():
...
...
@@ -391,20 +400,25 @@ class TrtConvertFlattenTest_dim_5(TrtLayerAutoScanTest):
# for static_shape
clear_dynamic_shape
()
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
1e-5
attrs
,
False
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
(
1e-3
,
1e-3
)
attrs
,
False
),
(
1e-3
,
1e-3
)
# for dynamic_shape
generate_dynamic_shape
(
attrs
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
1e-5
attrs
,
True
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
(
1e-3
,
1e-3
)
attrs
,
True
),
(
1e-3
,
1e-3
)
def
test
(
self
):
self
.
run_test
()
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_gather.py
View file @
dbe08e9b
...
...
@@ -23,7 +23,6 @@ import unittest
class
TrtConvertGatherTest
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
inputs
=
program_config
.
inputs
attrs
=
[
...
...
@@ -35,7 +34,6 @@ class TrtConvertGatherTest(TrtLayerAutoScanTest):
return
True
def
sample_program_configs
(
self
):
def
generate_input1
(
shape
):
return
np
.
random
.
random
(
shape
).
astype
(
np
.
float32
)
...
...
@@ -52,112 +50,126 @@ class TrtConvertGatherTest(TrtLayerAutoScanTest):
for
index
in
[[
1
,
4
],
[
4
,
8
]]:
for
axis
in
[
0
,
1
,
2
,
3
]:
for
overwrite
in
[
True
,
False
]:
for
input
in
[{
"X"
:
[
"input_data"
],
"Index"
:
[
"index_data"
]
},
{
for
input
in
[
{
"X"
:
[
"input_data"
],
"Index"
:
[
"index_data"
]},
{
"X"
:
[
"input_data"
],
"Index"
:
[
"index_data"
],
"Axis"
:
[
"axis_data"
]
}]:
"Axis"
:
[
"axis_data"
],
},
]:
for
index_type_int32
in
[
True
,
False
]:
self
.
shape
=
shape
self
.
axis
=
axis
self
.
input_num
=
len
(
input
)
self
.
index_type_int32
=
index_type_int32
dics
=
[{
"overwrite"
:
overwrite
,
"axis"
:
axis
}]
ops_config
=
[
{
"op_type"
:
"gather"
,
"op_inputs"
:
input
,
"op_
out
puts"
:
{
"Out"
:
[
"output_data"
]
}
,
"op_attrs"
:
dics
[
0
]
}
]
ops_config
=
[
{
"op_type"
:
"gather"
,
"op_
in
puts"
:
input
,
"op_outputs"
:
{
"Out"
:
[
"output_data"
]
},
"op_attrs"
:
dics
[
0
]
,
}
]
ops
=
self
.
generate_op_config
(
ops_config
)
program_config
=
ProgramConfig
(
ops
=
ops
,
weights
=
{},
inputs
=
{
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input1
,
shape
)),
"index_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input2
if
index_type_int32
==
True
else
generate_input4
,
index
)),
}
if
len
(
input
)
==
2
else
{
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input1
,
shape
)),
"index_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input2
,
index
)),
"axis_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input3
,
axis
)),
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input1
,
shape
)
),
"index_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input2
if
index_type_int32
==
True
else
generate_input4
,
index
,
)
),
}
if
len
(
input
)
==
2
else
{
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input1
,
shape
)
),
"index_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input2
,
index
)
),
"axis_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input3
,
axis
)
),
},
outputs
=
[
"output_data"
])
outputs
=
[
"output_data"
],
)
yield
program_config
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
if
len
(
self
.
shape
)
==
1
:
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
4
],
"index_data"
:
[
1
]
"index_data"
:
[
1
]
,
}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
128
],
"index_data"
:
[
4
]
"index_data"
:
[
4
]
,
}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
16
],
"index_data"
:
[
2
]
"index_data"
:
[
2
]
,
}
elif
len
(
self
.
shape
)
==
2
:
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
2
,
4
],
"index_data"
:
[
1
]
"index_data"
:
[
1
]
,
}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
256
,
256
],
"index_data"
:
[
4
]
"index_data"
:
[
4
]
,
}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
64
,
32
],
"index_data"
:
[
2
]
"index_data"
:
[
2
]
,
}
elif
len
(
self
.
shape
)
==
3
:
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
2
,
4
,
4
],
"index_data"
:
[
1
]
"index_data"
:
[
1
]
,
}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
128
,
256
,
256
],
"index_data"
:
[
4
]
"index_data"
:
[
4
]
,
}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
16
,
64
,
32
],
"index_data"
:
[
2
]
"index_data"
:
[
2
]
,
}
elif
len
(
self
.
shape
)
==
4
:
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
2
,
4
,
4
,
2
],
"index_data"
:
[
1
]
"index_data"
:
[
1
]
,
}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
128
,
256
,
64
,
128
],
"index_data"
:
[
4
]
"index_data"
:
[
4
]
,
}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
16
,
64
,
16
,
32
],
"index_data"
:
[
2
]
"index_data"
:
[
2
]
,
}
def
clear_dynamic_shape
():
...
...
@@ -182,10 +194,12 @@ class TrtConvertGatherTest(TrtLayerAutoScanTest):
clear_dynamic_shape
()
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
False
),
1e-5
False
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
False
),
1e-5
False
),
1e-3
# for dynamic_shape
generate_dynamic_shape
(
attrs
)
...
...
@@ -201,14 +215,17 @@ class TrtConvertGatherTest(TrtLayerAutoScanTest):
def
teller1
(
program_config
,
predictor_config
):
if
len
(
self
.
dynamic_shape
.
min_input_shape
)
!=
0
:
inputs
=
program_config
.
inputs
if
len
(
inputs
[
'input_data'
].
shape
)
==
1
or
len
(
inputs
[
'index_data'
].
shape
)
==
1
:
if
(
len
(
inputs
[
'input_data'
].
shape
)
==
1
or
len
(
inputs
[
'index_data'
].
shape
)
==
1
):
return
True
return
False
self
.
add_skip_case
(
teller1
,
SkipReasons
.
TRT_NOT_SUPPORT
,
"Need to repair the case: trt reshape out failed for dynamic shape mode when inputs' dims==1. under trt7.0 "
teller1
,
SkipReasons
.
TRT_NOT_SUPPORT
,
"Need to repair the case: trt reshape out failed for dynamic shape mode when inputs' dims==1. under trt7.0 "
,
)
def
test
(
self
):
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_gather_nd.py
View file @
dbe08e9b
...
...
@@ -23,7 +23,6 @@ import os
class
TrtConvertGatherNdTest_dim_4_1
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
# The output has diff between gpu and trt in CI windows
# if ( and self.trt_param.precision == paddle_infer.PrecisionType.Half):
...
...
@@ -31,54 +30,53 @@ class TrtConvertGatherNdTest_dim_4_1(TrtLayerAutoScanTest):
return
True
def
sample_program_configs
(
self
):
def
generate_input1
():
return
np
.
random
.
random
([
2
,
32
,
64
,
64
]).
astype
(
np
.
float32
)
def
generate_input2
():
return
np
.
ones
([
1
]).
astype
(
np
.
int32
)
ops_config
=
[{
"op_type"
:
"gather_nd"
,
"op_inputs"
:
{
"X"
:
[
"input_data"
],
"Index"
:
[
"index_data"
]
},
"op_outputs"
:
{
"Out"
:
[
"output_data"
]
},
"op_attrs"
:
{}
}]
ops_config
=
[
{
"op_type"
:
"gather_nd"
,
"op_inputs"
:
{
"X"
:
[
"input_data"
],
"Index"
:
[
"index_data"
]},
"op_outputs"
:
{
"Out"
:
[
"output_data"
]},
"op_attrs"
:
{},
}
]
ops
=
self
.
generate_op_config
(
ops_config
)
for
i
in
range
(
10
):
program_config
=
ProgramConfig
(
ops
=
ops
,
weights
=
{},
inputs
=
{
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input1
)),
"index_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input2
)),
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input1
)
),
"index_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input2
)
),
},
outputs
=
[
"output_data"
])
outputs
=
[
"output_data"
],
)
yield
program_config
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
8
,
8
,
8
],
"index_data"
:
[
1
]
"index_data"
:
[
1
]
,
}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
4
,
32
,
64
,
64
],
"index_data"
:
[
1
]
"index_data"
:
[
1
]
,
}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
2
,
32
,
64
,
64
],
"index_data"
:
[
1
]
"index_data"
:
[
1
]
,
}
def
clear_dynamic_shape
():
...
...
@@ -95,25 +93,26 @@ class TrtConvertGatherNdTest_dim_4_1(TrtLayerAutoScanTest):
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
(
0
,
4
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
(
0
,
4
),
1e-
5
yield
self
.
create_inference_config
(),
(
0
,
4
),
1e-
3
# for dynamic_shape
generate_dynamic_shape
(
attrs
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
(
1
,
3
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
(
1
,
3
),
1e-
5
yield
self
.
create_inference_config
(),
(
1
,
3
),
1e-
3
def
add_skip_trt_case
(
self
):
def
teller1
(
program_config
,
predictor_config
):
if
len
(
self
.
dynamic_shape
.
min_input_shape
)
!=
0
and
os
.
name
==
'nt'
:
return
True
return
False
self
.
add_skip_case
(
teller1
,
SkipReasons
.
TRT_NOT_SUPPORT
,
"Under Windows Ci, this case will sporadically fail."
)
teller1
,
SkipReasons
.
TRT_NOT_SUPPORT
,
"Under Windows Ci, this case will sporadically fail."
,
)
def
test
(
self
):
self
.
add_skip_trt_case
()
...
...
@@ -121,29 +120,24 @@ class TrtConvertGatherNdTest_dim_4_1(TrtLayerAutoScanTest):
class
TrtConvertGatherNdTest_dim_4_1_2
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
return
True
def
sample_program_configs
(
self
):
def
generate_input1
():
return
np
.
random
.
random
([
2
,
32
,
64
,
64
]).
astype
(
np
.
float32
)
def
generate_input2
():
return
np
.
array
([
1
,
2
]).
astype
(
np
.
int32
)
ops_config
=
[{
"op_type"
:
"gather_nd"
,
"op_inputs"
:
{
"X"
:
[
"input_data"
],
"Index"
:
[
"index_data"
]
},
"op_outputs"
:
{
"Out"
:
[
"output_data"
]
},
"op_attrs"
:
{}
}]
ops_config
=
[
{
"op_type"
:
"gather_nd"
,
"op_inputs"
:
{
"X"
:
[
"input_data"
],
"Index"
:
[
"index_data"
]},
"op_outputs"
:
{
"Out"
:
[
"output_data"
]},
"op_attrs"
:
{},
}
]
ops
=
self
.
generate_op_config
(
ops_config
)
program_config
=
ProgramConfig
(
...
...
@@ -153,25 +147,26 @@ class TrtConvertGatherNdTest_dim_4_1_2(TrtLayerAutoScanTest):
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input1
)),
"index_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input2
)),
},
outputs
=
[
"output_data"
])
outputs
=
[
"output_data"
],
)
yield
program_config
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
8
,
8
,
8
],
"index_data"
:
[
2
]
"index_data"
:
[
2
]
,
}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
4
,
32
,
64
,
64
],
"index_data"
:
[
2
]
"index_data"
:
[
2
]
,
}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
2
,
32
,
64
,
64
],
"index_data"
:
[
2
]
"index_data"
:
[
2
]
,
}
def
clear_dynamic_shape
():
...
...
@@ -188,25 +183,26 @@ class TrtConvertGatherNdTest_dim_4_1_2(TrtLayerAutoScanTest):
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
(
0
,
4
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
(
0
,
4
),
1e-
5
yield
self
.
create_inference_config
(),
(
0
,
4
),
1e-
3
# for dynamic_shape
generate_dynamic_shape
(
attrs
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
(
1
,
3
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
(
1
,
3
),
1e-
5
yield
self
.
create_inference_config
(),
(
1
,
3
),
1e-
3
def
add_skip_trt_case
(
self
):
def
teller1
(
program_config
,
predictor_config
):
if
len
(
self
.
dynamic_shape
.
min_input_shape
)
!=
0
and
os
.
name
==
'nt'
:
return
True
return
False
self
.
add_skip_case
(
teller1
,
SkipReasons
.
TRT_NOT_SUPPORT
,
"Under Windows Ci, this case will sporadically fail."
)
teller1
,
SkipReasons
.
TRT_NOT_SUPPORT
,
"Under Windows Ci, this case will sporadically fail."
,
)
def
test
(
self
):
self
.
add_skip_trt_case
()
...
...
@@ -214,29 +210,24 @@ class TrtConvertGatherNdTest_dim_4_1_2(TrtLayerAutoScanTest):
class
TrtConvertGatherNdTest_dim_4_2
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
return
True
def
sample_program_configs
(
self
):
def
generate_input1
():
return
np
.
random
.
random
([
2
,
32
,
64
,
64
]).
astype
(
np
.
float32
)
def
generate_input2
():
return
np
.
ones
([
2
,
2
]).
astype
(
np
.
int32
)
ops_config
=
[{
"op_type"
:
"gather_nd"
,
"op_inputs"
:
{
"X"
:
[
"input_data"
],
"Index"
:
[
"index_data"
]
},
"op_outputs"
:
{
"Out"
:
[
"output_data"
]
},
"op_attrs"
:
{}
}]
ops_config
=
[
{
"op_type"
:
"gather_nd"
,
"op_inputs"
:
{
"X"
:
[
"input_data"
],
"Index"
:
[
"index_data"
]},
"op_outputs"
:
{
"Out"
:
[
"output_data"
]},
"op_attrs"
:
{},
}
]
ops
=
self
.
generate_op_config
(
ops_config
)
program_config
=
ProgramConfig
(
...
...
@@ -246,25 +237,26 @@ class TrtConvertGatherNdTest_dim_4_2(TrtLayerAutoScanTest):
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input1
)),
"index_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input2
)),
},
outputs
=
[
"output_data"
])
outputs
=
[
"output_data"
],
)
yield
program_config
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
8
,
8
,
8
],
"index_data"
:
[
2
,
2
]
"index_data"
:
[
2
,
2
]
,
}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
4
,
32
,
64
,
64
],
"index_data"
:
[
2
,
2
]
"index_data"
:
[
2
,
2
]
,
}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
2
,
32
,
64
,
64
],
"index_data"
:
[
2
,
2
]
"index_data"
:
[
2
,
2
]
,
}
def
clear_dynamic_shape
():
...
...
@@ -281,25 +273,26 @@ class TrtConvertGatherNdTest_dim_4_2(TrtLayerAutoScanTest):
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
(
0
,
4
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
(
0
,
4
),
1e-
5
yield
self
.
create_inference_config
(),
(
0
,
4
),
1e-
3
# for dynamic_shape
generate_dynamic_shape
(
attrs
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
(
1
,
3
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
(
1
,
3
),
1e-
5
yield
self
.
create_inference_config
(),
(
1
,
3
),
1e-
3
def
add_skip_trt_case
(
self
):
def
teller1
(
program_config
,
predictor_config
):
if
len
(
self
.
dynamic_shape
.
min_input_shape
)
!=
0
and
os
.
name
==
'nt'
:
return
True
return
False
self
.
add_skip_case
(
teller1
,
SkipReasons
.
TRT_NOT_SUPPORT
,
"Under Windows Ci, this case will sporadically fail."
)
teller1
,
SkipReasons
.
TRT_NOT_SUPPORT
,
"Under Windows Ci, this case will sporadically fail."
,
)
def
test
(
self
):
self
.
add_skip_trt_case
()
...
...
@@ -307,29 +300,24 @@ class TrtConvertGatherNdTest_dim_4_2(TrtLayerAutoScanTest):
class
TrtConvertGatherNdTest_dim_4_3
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
return
True
def
sample_program_configs
(
self
):
def
generate_input1
():
return
np
.
random
.
random
([
2
,
32
,
64
,
64
]).
astype
(
np
.
float32
)
def
generate_input2
():
return
np
.
ones
([
2
,
2
,
4
]).
astype
(
np
.
int32
)
ops_config
=
[{
"op_type"
:
"gather_nd"
,
"op_inputs"
:
{
"X"
:
[
"input_data"
],
"Index"
:
[
"index_data"
]
},
"op_outputs"
:
{
"Out"
:
[
"output_data"
]
},
"op_attrs"
:
{}
}]
ops_config
=
[
{
"op_type"
:
"gather_nd"
,
"op_inputs"
:
{
"X"
:
[
"input_data"
],
"Index"
:
[
"index_data"
]},
"op_outputs"
:
{
"Out"
:
[
"output_data"
]},
"op_attrs"
:
{},
}
]
ops
=
self
.
generate_op_config
(
ops_config
)
program_config
=
ProgramConfig
(
...
...
@@ -339,25 +327,26 @@ class TrtConvertGatherNdTest_dim_4_3(TrtLayerAutoScanTest):
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input1
)),
"index_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input2
)),
},
outputs
=
[
"output_data"
])
outputs
=
[
"output_data"
],
)
yield
program_config
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
8
,
8
,
8
],
"index_data"
:
[
2
,
2
,
4
]
"index_data"
:
[
2
,
2
,
4
]
,
}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
4
,
32
,
64
,
64
],
"index_data"
:
[
2
,
2
,
4
]
"index_data"
:
[
2
,
2
,
4
]
,
}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
2
,
32
,
64
,
64
],
"index_data"
:
[
2
,
2
,
4
]
"index_data"
:
[
2
,
2
,
4
]
,
}
def
clear_dynamic_shape
():
...
...
@@ -374,25 +363,26 @@ class TrtConvertGatherNdTest_dim_4_3(TrtLayerAutoScanTest):
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
(
0
,
4
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
(
0
,
4
),
1e-
5
yield
self
.
create_inference_config
(),
(
0
,
4
),
1e-
3
# for dynamic_shape
generate_dynamic_shape
(
attrs
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
(
1
,
3
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
(
1
,
3
),
1e-
5
yield
self
.
create_inference_config
(),
(
1
,
3
),
1e-
3
def
add_skip_trt_case
(
self
):
def
teller1
(
program_config
,
predictor_config
):
if
len
(
self
.
dynamic_shape
.
min_input_shape
)
!=
0
and
os
.
name
==
'nt'
:
return
True
return
False
self
.
add_skip_case
(
teller1
,
SkipReasons
.
TRT_NOT_SUPPORT
,
"Under Windows Ci, this case will sporadically fail."
)
teller1
,
SkipReasons
.
TRT_NOT_SUPPORT
,
"Under Windows Ci, this case will sporadically fail."
,
)
def
test
(
self
):
self
.
add_skip_trt_case
()
...
...
@@ -400,29 +390,24 @@ class TrtConvertGatherNdTest_dim_4_3(TrtLayerAutoScanTest):
class
TrtConvertGatherNdTest_dim_2_2
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
return
True
def
sample_program_configs
(
self
):
def
generate_input1
():
return
np
.
random
.
random
([
2
,
32
]).
astype
(
np
.
float32
)
def
generate_input2
():
return
np
.
array
([[
0
,
3
],
[
1
,
9
]]).
astype
(
np
.
int32
)
ops_config
=
[{
"op_type"
:
"gather_nd"
,
"op_inputs"
:
{
"X"
:
[
"input_data"
],
"Index"
:
[
"index_data"
]
},
"op_outputs"
:
{
"Out"
:
[
"output_data"
]
},
"op_attrs"
:
{}
}]
ops_config
=
[
{
"op_type"
:
"gather_nd"
,
"op_inputs"
:
{
"X"
:
[
"input_data"
],
"Index"
:
[
"index_data"
]},
"op_outputs"
:
{
"Out"
:
[
"output_data"
]},
"op_attrs"
:
{},
}
]
ops
=
self
.
generate_op_config
(
ops_config
)
program_config
=
ProgramConfig
(
...
...
@@ -432,25 +417,26 @@ class TrtConvertGatherNdTest_dim_2_2(TrtLayerAutoScanTest):
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input1
)),
"index_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input2
)),
},
outputs
=
[
"output_data"
])
outputs
=
[
"output_data"
],
)
yield
program_config
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
4
],
"index_data"
:
[
2
,
2
]
"index_data"
:
[
2
,
2
]
,
}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
4
,
64
],
"index_data"
:
[
2
,
2
]
"index_data"
:
[
2
,
2
]
,
}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
2
,
8
],
"index_data"
:
[
2
,
2
]
"index_data"
:
[
2
,
2
]
,
}
def
clear_dynamic_shape
():
...
...
@@ -467,25 +453,26 @@ class TrtConvertGatherNdTest_dim_2_2(TrtLayerAutoScanTest):
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
(
0
,
4
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
(
0
,
4
),
1e-
5
yield
self
.
create_inference_config
(),
(
0
,
4
),
1e-
3
# for dynamic_shape
generate_dynamic_shape
(
attrs
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
(
1
,
3
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
(
1
,
3
),
1e-
5
yield
self
.
create_inference_config
(),
(
1
,
3
),
1e-
3
def
add_skip_trt_case
(
self
):
def
teller1
(
program_config
,
predictor_config
):
if
len
(
self
.
dynamic_shape
.
min_input_shape
)
!=
0
and
os
.
name
==
'nt'
:
return
True
return
False
self
.
add_skip_case
(
teller1
,
SkipReasons
.
TRT_NOT_SUPPORT
,
"Under Windows Ci, this case will sporadically fail."
)
teller1
,
SkipReasons
.
TRT_NOT_SUPPORT
,
"Under Windows Ci, this case will sporadically fail."
,
)
def
test
(
self
):
self
.
add_skip_trt_case
()
...
...
@@ -493,30 +480,26 @@ class TrtConvertGatherNdTest_dim_2_2(TrtLayerAutoScanTest):
class
TrtConvertGatherNdTest_dim_3_3
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
return
True
def
sample_program_configs
(
self
):
def
generate_input1
():
return
np
.
random
.
random
([
16
,
32
,
256
]).
astype
(
np
.
float32
)
def
generate_input2
():
return
np
.
array
([[[
2
,
5
],
[
3
,
8
]],
[[
0
,
2
],
[
0
,
3
]]]).
astype
(
np
.
int32
)
ops_config
=
[{
"op_type"
:
"gather_nd"
,
"op_inputs"
:
{
"X"
:
[
"input_data"
],
"Index"
:
[
"index_data"
]
},
"op_outputs"
:
{
"Out"
:
[
"output_data"
]
},
"op_attrs"
:
{}
}]
return
np
.
array
([[[
2
,
5
],
[
3
,
8
]],
[[
0
,
2
],
[
0
,
3
]]]).
astype
(
np
.
int32
)
ops_config
=
[
{
"op_type"
:
"gather_nd"
,
"op_inputs"
:
{
"X"
:
[
"input_data"
],
"Index"
:
[
"index_data"
]},
"op_outputs"
:
{
"Out"
:
[
"output_data"
]},
"op_attrs"
:
{},
}
]
ops
=
self
.
generate_op_config
(
ops_config
)
program_config
=
ProgramConfig
(
...
...
@@ -526,25 +509,26 @@ class TrtConvertGatherNdTest_dim_3_3(TrtLayerAutoScanTest):
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input1
)),
"index_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input2
)),
},
outputs
=
[
"output_data"
])
outputs
=
[
"output_data"
],
)
yield
program_config
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
4
,
4
],
"index_data"
:
[
1
,
1
,
1
]
"index_data"
:
[
1
,
1
,
1
]
,
}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
16
,
64
,
512
],
"index_data"
:
[
4
,
2
,
4
]
"index_data"
:
[
4
,
2
,
4
]
,
}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
2
,
8
,
64
],
"index_data"
:
[
2
,
2
,
2
]
"index_data"
:
[
2
,
2
,
2
]
,
}
def
clear_dynamic_shape
():
...
...
@@ -561,14 +545,14 @@ class TrtConvertGatherNdTest_dim_3_3(TrtLayerAutoScanTest):
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
(
0
,
4
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
(
0
,
4
),
1e-
5
yield
self
.
create_inference_config
(),
(
0
,
4
),
1e-
3
# for dynamic_shape
generate_dynamic_shape
(
attrs
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
(
1
,
3
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
(
1
,
3
),
1e-
5
yield
self
.
create_inference_config
(),
(
1
,
3
),
1e-
3
def
test
(
self
):
self
.
run_test
()
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_gelu.py
View file @
dbe08e9b
...
...
@@ -22,12 +22,10 @@ import unittest
class
TrtConvertGeluTest
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
return
True
def
sample_program_configs
(
self
):
def
generate_input1
(
dims
,
attrs
:
List
[
Dict
[
str
,
Any
]]):
if
dims
==
1
:
return
np
.
ones
([
32
]).
astype
(
np
.
float32
)
...
...
@@ -43,33 +41,32 @@ class TrtConvertGeluTest(TrtLayerAutoScanTest):
self
.
dims
=
dims
dics
=
[{
"approximate"
:
approximate
}]
ops_config
=
[{
"op_type"
:
"gelu"
,
"op_inputs"
:
{
"X"
:
[
"input_data"
]
},
"op_outputs"
:
{
"Out"
:
[
"output_data"
]
},
"op_attrs"
:
dics
[
0
]
}]
ops_config
=
[
{
"op_type"
:
"gelu"
,
"op_inputs"
:
{
"X"
:
[
"input_data"
]},
"op_outputs"
:
{
"Out"
:
[
"output_data"
]},
"op_attrs"
:
dics
[
0
],
}
]
ops
=
self
.
generate_op_config
(
ops_config
)
program_config
=
ProgramConfig
(
ops
=
ops
,
weights
=
{},
inputs
=
{
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input1
,
dims
,
dics
)
)
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input1
,
dims
,
dics
)
)
},
outputs
=
[
"output_data"
])
outputs
=
[
"output_data"
],
)
yield
program_config
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
if
self
.
dims
==
1
:
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
]}
...
...
@@ -123,19 +120,23 @@ class TrtConvertGeluTest(TrtLayerAutoScanTest):
clear_dynamic_shape
()
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
1e-5
attrs
,
False
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
1e-5
attrs
,
False
),
1e-3
# for dynamic_shape
generate_dynamic_shape
(
attrs
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
1e-5
attrs
,
True
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
1e-5
attrs
,
True
),
1e-3
def
test
(
self
):
self
.
run_test
()
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_grid_sampler.py
View file @
dbe08e9b
...
...
@@ -22,29 +22,27 @@ import unittest
class
TrtConvertGridSampler
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
return
True
def
sample_program_configs
(
self
):
def
generate_input1
():
return
np
.
random
.
random
([
1
,
3
,
32
,
32
]).
astype
(
np
.
float32
)
def
generate_input2
():
return
np
.
random
.
random
([
1
,
3
,
3
,
2
]).
astype
(
np
.
float32
)
ops_config
=
[
{
"op_type"
:
"grid_sampler"
,
"op_inputs"
:
{
"
X"
:
[
"input_data"
],
"Grid"
:
[
"grid
_data"
],
}
,
"op_outputs"
:
{
"Output"
:
[
"output_data"
]
},
"op_attrs"
:
{
}
}
]
ops_config
=
[
{
"op_type"
:
"grid_sampler"
,
"
op_inputs"
:
{
"X"
:
[
"input
_data"
],
"Grid"
:
[
"grid_data"
]
,
},
"op_outputs"
:
{
"Output"
:
[
"output_data"
]
},
"op_attrs"
:
{
},
}
]
ops
=
self
.
generate_op_config
(
ops_config
)
for
i
in
range
(
10
):
...
...
@@ -52,30 +50,33 @@ class TrtConvertGridSampler(TrtLayerAutoScanTest):
ops
=
ops
,
weights
=
{},
inputs
=
{
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input1
)),
"grid_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input2
)),
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input1
)
),
"grid_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input2
)
),
},
outputs
=
[
"output_data"
])
outputs
=
[
"output_data"
],
)
yield
program_config
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
3
,
32
,
32
],
"grid_data"
:
[
1
,
3
,
3
,
2
]
"grid_data"
:
[
1
,
3
,
3
,
2
]
,
}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
1
,
3
,
64
,
64
],
"grid_data"
:
[
1
,
3
,
4
,
4
]
"grid_data"
:
[
1
,
3
,
4
,
4
]
,
}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
1
,
3
,
32
,
32
],
"grid_data"
:
[
1
,
3
,
3
,
2
]
"grid_data"
:
[
1
,
3
,
3
,
2
]
,
}
def
clear_dynamic_shape
():
...
...
@@ -92,14 +93,14 @@ class TrtConvertGridSampler(TrtLayerAutoScanTest):
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
(
0
,
4
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
(
0
,
4
),
1e-
5
yield
self
.
create_inference_config
(),
(
0
,
4
),
1e-
3
# for dynamic_shape
generate_dynamic_shape
(
attrs
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
(
1
,
3
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
(
1
,
3
),
1e-
5
yield
self
.
create_inference_config
(),
(
1
,
3
),
1e-
3
def
test
(
self
):
self
.
run_test
()
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_group_norm.py
View file @
dbe08e9b
...
...
@@ -22,7 +22,6 @@ import unittest
class
TrtConvertGroupNormTest
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
inputs
=
program_config
.
inputs
weights
=
program_config
.
weights
...
...
@@ -36,7 +35,6 @@ class TrtConvertGroupNormTest(TrtLayerAutoScanTest):
return
True
def
sample_program_configs
(
self
):
def
generate_input
(
attrs
:
List
[
Dict
[
str
,
Any
]],
batch
):
if
attrs
[
0
][
'data_layout'
]
==
'NCHW'
:
return
np
.
random
.
random
([
batch
,
32
,
64
,
64
]).
astype
(
np
.
float32
)
...
...
@@ -53,47 +51,56 @@ class TrtConvertGroupNormTest(TrtLayerAutoScanTest):
for
group
in
[
1
,
4
,
32
,
-
1
]:
for
epsilon
in
[
0.0001
,
0.0007
,
-
1
,
1
]:
for
data_layout
in
[
'NCHW'
]:
dics
=
[{
"epsilon"
:
epsilon
,
"groups"
:
group
,
"data_layout"
:
data_layout
}]
ops_config
=
[{
"op_type"
:
"group_norm"
,
"op_inputs"
:
{
"X"
:
[
"input_data"
],
"Scale"
:
[
"scale_weight"
],
"Bias"
:
[
"bias_weight"
]
},
"op_outputs"
:
{
"Y"
:
[
"y_output"
],
"Mean"
:
[
"mean_output"
],
"Variance"
:
[
"variance_output"
]
},
"op_attrs"
:
dics
[
0
]
}]
dics
=
[
{
"epsilon"
:
epsilon
,
"groups"
:
group
,
"data_layout"
:
data_layout
,
}
]
ops_config
=
[
{
"op_type"
:
"group_norm"
,
"op_inputs"
:
{
"X"
:
[
"input_data"
],
"Scale"
:
[
"scale_weight"
],
"Bias"
:
[
"bias_weight"
],
},
"op_outputs"
:
{
"Y"
:
[
"y_output"
],
"Mean"
:
[
"mean_output"
],
"Variance"
:
[
"variance_output"
],
},
"op_attrs"
:
dics
[
0
],
}
]
ops
=
self
.
generate_op_config
(
ops_config
)
program_config
=
ProgramConfig
(
ops
=
ops
,
weights
=
{
"scale_weight"
:
TensorConfig
(
data_gen
=
partial
(
generate_scale
)),
"bias_weight"
:
TensorConfig
(
data_gen
=
partial
(
generate_bias
))
"scale_weight"
:
TensorConfig
(
data_gen
=
partial
(
generate_scale
)
),
"bias_weight"
:
TensorConfig
(
data_gen
=
partial
(
generate_bias
)
),
},
inputs
=
{
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input
,
dics
,
batch
))
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input
,
dics
,
batch
)
)
},
outputs
=
[
"y_output"
])
outputs
=
[
"y_output"
],
)
yield
program_config
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
16
,
16
,
16
]}
self
.
dynamic_shape
.
max_input_shape
=
{
...
...
@@ -117,19 +124,23 @@ class TrtConvertGroupNormTest(TrtLayerAutoScanTest):
clear_dynamic_shape
()
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
1e-5
attrs
,
False
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
1e-5
attrs
,
False
),
(
1e-3
,
1e-3
)
# for dynamic_shape
generate_dynamic_shape
(
attrs
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
1e-5
attrs
,
True
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
1e-5
attrs
,
True
),
(
1e-3
,
1e-3
)
def
add_skip_trt_case
(
self
):
pass
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_hard_sigmoid.py
View file @
dbe08e9b
...
...
@@ -22,12 +22,10 @@ import unittest
class
TrtConvertHardSigmoidTest_dim_2
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
return
True
def
sample_program_configs
(
self
):
def
generate_input
(
shape
):
return
np
.
random
.
random
(
shape
).
astype
(
np
.
float32
)
...
...
@@ -37,33 +35,34 @@ class TrtConvertHardSigmoidTest_dim_2(TrtLayerAutoScanTest):
for
slope
in
[
0.1
,
0.5
]:
for
offset
in
[
0.2
,
0.7
]:
dics
=
[{
"slope"
:
slope
,
"offset"
:
offset
}]
ops_config
=
[
{
"op_type"
:
"hard_sigmoid"
,
"op_inputs"
:
{
"
X"
:
[
"input_data"
],
}
,
"op_outputs"
:
{
"Out"
:
[
"output_data"
]
}
,
"op_attrs"
:
dics
[
0
]
}
]
ops_config
=
[
{
"op_type"
:
"hard_sigmoid"
,
"
op_inputs"
:
{
"X"
:
[
"input_data"
]
,
},
"op_outputs"
:
{
"Out"
:
[
"output_data"
]
},
"op_attrs"
:
dics
[
0
]
,
}
]
ops
=
self
.
generate_op_config
(
ops_config
)
program_config
=
ProgramConfig
(
ops
=
ops
,
weights
=
{},
inputs
=
{
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input
,
shape
)
)
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input
,
shape
)
)
},
outputs
=
[
"output_data"
])
outputs
=
[
"output_data"
],
)
yield
program_config
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
if
self
.
input_dim
==
2
:
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
8
]}
...
...
@@ -98,14 +97,14 @@ class TrtConvertHardSigmoidTest_dim_2(TrtLayerAutoScanTest):
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
(
1
,
2
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
(
1
,
2
),
1e-
5
yield
self
.
create_inference_config
(),
(
1
,
2
),
1e-
3
# for dynamic_shape
generate_dynamic_shape
(
attrs
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
(
1
,
2
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
(
1
,
2
),
1e-
5
yield
self
.
create_inference_config
(),
(
1
,
2
),
1e-
3
def
test
(
self
):
self
.
run_test
()
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_hard_swish.py
View file @
dbe08e9b
...
...
@@ -22,7 +22,6 @@ import unittest
class
TrtConvertHardSwishTest
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
inputs
=
program_config
.
inputs
weights
=
program_config
.
weights
...
...
@@ -36,46 +35,46 @@ class TrtConvertHardSwishTest(TrtLayerAutoScanTest):
return
True
def
sample_program_configs
(
self
):
def
generate_input1
(
attrs
:
List
[
Dict
[
str
,
Any
]]):
return
np
.
ones
([
1
,
3
,
32
,
32
]).
astype
(
np
.
float32
)
for
threshold
in
[
6.0
,
7.0
,
100.0
,
0.0
,
-
1.0
]:
for
scale
in
[
5.0
,
7.0
,
-
1.0
,
0.0
,
100.0
]:
for
offset
in
[
3.0
,
5.0
,
-
1.0
,
0.0
,
100.0
]:
dics
=
[
{
"threshold"
:
threshold
,
"scale"
:
scale
,
"offset"
:
offset
}]
ops_config
=
[{
"op_type"
:
"hard_swish"
,
"op_inputs"
:
{
"X"
:
[
"input_data"
]
}
,
"op_
out
puts"
:
{
"Out"
:
[
"hard_swish_output_data"
]
}
,
"op_attrs"
:
dics
[
0
]
}
]
dics
=
[
{
"threshold"
:
threshold
,
"scale"
:
scale
,
"offset"
:
offset
,
}
]
ops_config
=
[
{
"op_type"
:
"hard_swish"
,
"op_
in
puts"
:
{
"X"
:
[
"input_data"
]},
"op_outputs"
:
{
"Out"
:
[
"hard_swish_output_data"
]
},
"op_attrs"
:
dics
[
0
]
,
}
]
ops
=
self
.
generate_op_config
(
ops_config
)
program_config
=
ProgramConfig
(
ops
=
ops
,
weights
=
{},
inputs
=
{
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input1
,
dics
)
)
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input1
,
dics
)
)
},
outputs
=
[
"hard_swish_output_data"
])
outputs
=
[
"hard_swish_output_data"
],
)
yield
program_config
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
3
,
16
,
16
]}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
2
,
3
,
32
,
32
]}
...
...
@@ -97,19 +96,23 @@ class TrtConvertHardSwishTest(TrtLayerAutoScanTest):
clear_dynamic_shape
()
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
1e-5
attrs
,
False
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
(
1e-5
,
1e-5
)
attrs
,
False
),
(
1e-3
,
1e-3
)
# for dynamic_shape
generate_dynamic_shape
(
attrs
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
1e-5
attrs
,
True
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
(
1e-5
,
1e-5
)
attrs
,
True
),
(
1e-3
,
1e-3
)
def
test
(
self
):
self
.
run_test
()
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_inverse.py
View file @
dbe08e9b
...
...
@@ -22,41 +22,41 @@ import unittest
class
TrtConvertInverse
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
return
True
def
sample_program_configs
(
self
):
def
generate_input1
():
return
np
.
random
.
random
([
32
,
32
]).
astype
(
np
.
float32
)
ops_config
=
[
{
"op_type"
:
"inverse"
,
"op_inputs"
:
{
"
Input"
:
[
"input_data"
],
}
,
"op_outputs"
:
{
"Output"
:
[
"output_data"
]
},
"op_attrs"
:
{
}
}
]
ops_config
=
[
{
"op_type"
:
"inverse"
,
"
op_inputs"
:
{
"Input"
:
[
"input_data"
]
,
},
"op_outputs"
:
{
"Output"
:
[
"output_data"
]
},
"op_attrs"
:
{
},
}
]
ops
=
self
.
generate_op_config
(
ops_config
)
for
i
in
range
(
10
):
program_config
=
ProgramConfig
(
ops
=
ops
,
weights
=
{},
inputs
=
{
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input1
)),
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input1
)
),
},
outputs
=
[
"output_data"
])
outputs
=
[
"output_data"
],
)
yield
program_config
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
1
],
...
...
@@ -82,14 +82,14 @@ class TrtConvertInverse(TrtLayerAutoScanTest):
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
(
0
,
3
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
(
0
,
3
),
1e-
5
yield
self
.
create_inference_config
(),
(
0
,
3
),
1e-
3
# for dynamic_shape
generate_dynamic_shape
(
attrs
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
(
1
,
2
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
(
1
,
2
),
1e-
5
yield
self
.
create_inference_config
(),
(
1
,
2
),
1e-
3
def
test
(
self
):
self
.
run_test
()
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_leaky_relu.py
View file @
dbe08e9b
...
...
@@ -23,12 +23,10 @@ import unittest
class
TrtConvertLeakyReluTest
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
return
True
def
sample_program_configs
(
self
):
def
generate_input1
(
shape
):
return
np
.
random
.
random
(
shape
).
astype
(
np
.
float32
)
...
...
@@ -37,32 +35,35 @@ class TrtConvertLeakyReluTest(TrtLayerAutoScanTest):
self
.
input_dim
=
len
(
shape
)
for
alpha
in
[
0.02
,
1.0
,
100.0
,
-
1.0
,
0.0
]:
dics
=
[{
"alpha"
:
alpha
}]
ops_config
=
[{
"op_type"
:
"leaky_relu"
,
"op_inputs"
:
{
"X"
:
[
"input_data"
],
},
"op_outputs"
:
{
"Out"
:
[
"y_data"
],
},
"op_attrs"
:
dics
[
0
]
}]
ops_config
=
[
{
"op_type"
:
"leaky_relu"
,
"op_inputs"
:
{
"X"
:
[
"input_data"
],
},
"op_outputs"
:
{
"Out"
:
[
"y_data"
],
},
"op_attrs"
:
dics
[
0
],
}
]
ops
=
self
.
generate_op_config
(
ops_config
)
program_config
=
ProgramConfig
(
ops
=
ops
,
weights
=
{},
inputs
=
{
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input1
,
shape
)
)
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input1
,
shape
)
)
},
outputs
=
[
"y_data"
])
outputs
=
[
"y_data"
],
)
yield
program_config
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
if
self
.
input_dim
==
2
:
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
8
]}
...
...
@@ -101,25 +102,31 @@ class TrtConvertLeakyReluTest(TrtLayerAutoScanTest):
clear_dynamic_shape
()
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
1e-5
attrs
,
False
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
(
1e-5
,
1e-5
)
attrs
,
False
),
(
1e-3
,
1e-3
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Int8
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
(
1e-5
,
1e-5
)
attrs
,
False
),
(
1e-3
,
1e-3
)
# for dynamic_shape
generate_dynamic_shape
(
attrs
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
1e-5
attrs
,
True
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
(
1e-5
,
1e-5
)
attrs
,
True
),
(
1e-3
,
1e-3
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Int8
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
(
1e-5
,
1e-5
)
attrs
,
True
),
(
1e-3
,
1e-3
)
def
test
(
self
):
self
.
run_test
()
...
...
Prev
1
…
7
8
9
10
11
12
13
14
15
16
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment