Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
Paddle
Commits
dbe08e9b
Commit
dbe08e9b
authored
Jun 12, 2023
by
yuguo960516yuguo
Browse files
2.4.2
parent
b5499578
Changes
302
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
1284 additions
and
1109 deletions
+1284
-1109
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_arg_max.py
.../tests/unittests/ir/inference/test_trt_convert_arg_max.py
+29
-26
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_bmm.py
...luid/tests/unittests/ir/inference/test_trt_convert_bmm.py
+32
-31
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_clip.py
...uid/tests/unittests/ir/inference/test_trt_convert_clip.py
+39
-37
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_concat.py
...d/tests/unittests/ir/inference/test_trt_convert_concat.py
+102
-75
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_conv2d_transpose.py
...ittests/ir/inference/test_trt_convert_conv2d_transpose.py
+122
-96
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_dropout.py
.../tests/unittests/ir/inference/test_trt_convert_dropout.py
+44
-32
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_elementwise.py
...ts/unittests/ir/inference/test_trt_convert_elementwise.py
+246
-196
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_equal.py
...id/tests/unittests/ir/inference/test_trt_convert_equal.py
+42
-42
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_fc.py
...fluid/tests/unittests/ir/inference/test_trt_convert_fc.py
+75
-70
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_fill_constant.py
.../unittests/ir/inference/test_trt_convert_fill_constant.py
+48
-36
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_flatten.py
.../tests/unittests/ir/inference/test_trt_convert_flatten.py
+106
-92
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_gather.py
...d/tests/unittests/ir/inference/test_trt_convert_gather.py
+72
-55
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_gather_nd.py
...ests/unittests/ir/inference/test_trt_convert_gather_nd.py
+132
-148
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_gelu.py
...uid/tests/unittests/ir/inference/test_trt_convert_gelu.py
+23
-22
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_grid_sampler.py
...s/unittests/ir/inference/test_trt_convert_grid_sampler.py
+26
-25
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_group_norm.py
...sts/unittests/ir/inference/test_trt_convert_group_norm.py
+46
-35
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_hard_sigmoid.py
...s/unittests/ir/inference/test_trt_convert_hard_sigmoid.py
+19
-20
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_hard_swish.py
...sts/unittests/ir/inference/test_trt_convert_hard_swish.py
+31
-28
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_inverse.py
.../tests/unittests/ir/inference/test_trt_convert_inverse.py
+19
-19
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_leaky_relu.py
...sts/unittests/ir/inference/test_trt_convert_leaky_relu.py
+31
-24
No files found.
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_arg_max.py
View file @
dbe08e9b
...
@@ -22,7 +22,6 @@ from typing import List
...
@@ -22,7 +22,6 @@ from typing import List
class
TrtConvertArgMaxTest
(
TrtLayerAutoScanTest
):
class
TrtConvertArgMaxTest
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
input_shape
=
program_config
.
inputs
[
"arg_max_input"
].
shape
input_shape
=
program_config
.
inputs
[
"arg_max_input"
].
shape
axis
=
program_config
.
ops
[
0
].
attrs
[
"axis"
]
axis
=
program_config
.
ops
[
0
].
attrs
[
"axis"
]
...
@@ -33,7 +32,6 @@ class TrtConvertArgMaxTest(TrtLayerAutoScanTest):
...
@@ -33,7 +32,6 @@ class TrtConvertArgMaxTest(TrtLayerAutoScanTest):
return
True
return
True
def
sample_program_configs
(
self
):
def
sample_program_configs
(
self
):
def
generate_input
(
rank
,
batch
):
def
generate_input
(
rank
,
batch
):
dims
=
[
batch
]
dims
=
[
batch
]
for
i
in
range
(
rank
-
1
):
for
i
in
range
(
rank
-
1
):
...
@@ -48,36 +46,37 @@ class TrtConvertArgMaxTest(TrtLayerAutoScanTest):
...
@@ -48,36 +46,37 @@ class TrtConvertArgMaxTest(TrtLayerAutoScanTest):
self
.
rank
=
rank
self
.
rank
=
rank
flatten
=
False
flatten
=
False
dtype
=
2
dtype
=
2
ops_config
=
[{
ops_config
=
[
"op_type"
:
"arg_max"
,
{
"op_inputs"
:
{
"op_type"
:
"arg_max"
,
"X"
:
[
"arg_max_input"
]
"op_inputs"
:
{
"X"
:
[
"arg_max_input"
]},
},
"op_outputs"
:
{
"Out"
:
[
"arg_max_out"
]},
"op_outputs"
:
{
"op_attrs"
:
{
"Out"
:
[
"arg_max_out"
]
"axis"
:
axis
,
},
"keepdims"
:
keepdims
,
"op_attrs"
:
{
"flatten"
:
flatten
,
"axis"
:
axis
,
"dtype"
:
dtype
,
"keepdims"
:
keepdims
,
},
"flatten"
:
flatten
,
"dtype"
:
dtype
}
}
}
]
]
ops
=
self
.
generate_op_config
(
ops_config
)
ops
=
self
.
generate_op_config
(
ops_config
)
program_config
=
ProgramConfig
(
program_config
=
ProgramConfig
(
ops
=
ops
,
ops
=
ops
,
weights
=
{},
weights
=
{},
inputs
=
{
inputs
=
{
"arg_max_input"
:
"arg_max_input"
:
TensorConfig
(
TensorConfig
(
data_gen
=
partial
(
data_gen
=
partial
(
generate_input
,
rank
,
batch
))
generate_input
,
rank
,
batch
)
)
},
},
outputs
=
[
"arg_max_out"
])
outputs
=
[
"arg_max_out"
],
)
yield
program_config
yield
program_config
def
sample_predictor_configs
(
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
def
generate_dynamic_shape
(
attrs
):
if
self
.
rank
==
3
:
if
self
.
rank
==
3
:
self
.
dynamic_shape
.
min_input_shape
=
{
self
.
dynamic_shape
.
min_input_shape
=
{
...
@@ -117,19 +116,23 @@ class TrtConvertArgMaxTest(TrtLayerAutoScanTest):
...
@@ -117,19 +116,23 @@ class TrtConvertArgMaxTest(TrtLayerAutoScanTest):
clear_dynamic_shape
()
clear_dynamic_shape
()
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
1e-5
attrs
,
False
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
1e-5
attrs
,
False
),
1e-3
# for dynamic_shape
# for dynamic_shape
generate_dynamic_shape
(
attrs
)
generate_dynamic_shape
(
attrs
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
1e-5
attrs
,
True
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
1e-5
attrs
,
True
),
1e-3
def
test
(
self
):
def
test
(
self
):
self
.
run_test
()
self
.
run_test
()
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_bmm.py
View file @
dbe08e9b
...
@@ -12,20 +12,18 @@
...
@@ -12,20 +12,18 @@
# See the License for the specific language governing permissions and
# See the License for the specific language governing permissions and
# limitations under the License.
# limitations under the License.
from
trt_layer_auto_scan_test
import
TrtLayerAutoScanTest
,
SkipReasons
from
trt_layer_auto_scan_test
import
TrtLayerAutoScanTest
from
program_config
import
TensorConfig
,
ProgramConfig
from
program_config
import
TensorConfig
,
ProgramConfig
import
numpy
as
np
import
numpy
as
np
import
paddle.inference
as
paddle_infer
import
paddle.inference
as
paddle_infer
from
functools
import
partial
from
functools
import
partial
from
typing
import
Optional
,
List
,
Callable
,
Dict
,
Any
,
Se
t
from
typing
import
Lis
t
import
unittest
import
unittest
import
os
import
os
class
TrtConvertBmmTest_dynamic
(
TrtLayerAutoScanTest
):
class
TrtConvertBmmTest_dynamic
(
TrtLayerAutoScanTest
):
def
sample_program_configs
(
self
):
def
sample_program_configs
(
self
):
def
generate_input
(
shape
):
def
generate_input
(
shape
):
return
np
.
random
.
random
(
shape
).
astype
(
np
.
float32
)
return
np
.
random
.
random
(
shape
).
astype
(
np
.
float32
)
...
@@ -33,48 +31,47 @@ class TrtConvertBmmTest_dynamic(TrtLayerAutoScanTest):
...
@@ -33,48 +31,47 @@ class TrtConvertBmmTest_dynamic(TrtLayerAutoScanTest):
input1_shape
=
[
batch
,
350
,
75
]
input1_shape
=
[
batch
,
350
,
75
]
input2_shape
=
[
batch
,
75
,
25
]
input2_shape
=
[
batch
,
75
,
25
]
dics
=
[{}]
dics
=
[{}]
ops_config
=
[{
ops_config
=
[
"op_type"
:
"bmm"
,
{
"op_inputs"
:
{
"op_type"
:
"bmm"
,
"X"
:
[
"input1_data"
],
"op_inputs"
:
{
"X"
:
[
"input1_data"
],
"Y"
:
[
"input2_data"
]},
"Y"
:
[
"input2_data"
]
"op_outputs"
:
{
"Out"
:
[
"output_data"
]},
},
"op_attrs"
:
dics
[
0
],
"op_outputs"
:
{
}
"Out"
:
[
"output_data"
]
]
},
"op_attrs"
:
dics
[
0
]
}]
ops
=
self
.
generate_op_config
(
ops_config
)
ops
=
self
.
generate_op_config
(
ops_config
)
program_config
=
ProgramConfig
(
program_config
=
ProgramConfig
(
ops
=
ops
,
ops
=
ops
,
weights
=
{},
weights
=
{},
inputs
=
{
inputs
=
{
"input1_data"
:
"input1_data"
:
TensorConfig
(
TensorConfig
(
data_gen
=
partial
(
generate_input
,
input1_shape
)
data_gen
=
partial
(
generate_input
,
input1_shape
)),
),
"input2_data"
:
"input2_data"
:
TensorConfig
(
TensorConfig
(
data_gen
=
partial
(
generate_input
,
input2_shape
))
data_gen
=
partial
(
generate_input
,
input2_shape
)
),
},
},
outputs
=
[
"output_data"
])
outputs
=
[
"output_data"
],
)
yield
program_config
yield
program_config
def
sample_predictor_configs
(
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
def
generate_dynamic_shape
(
attrs
):
self
.
dynamic_shape
.
min_input_shape
=
{
self
.
dynamic_shape
.
min_input_shape
=
{
"input1_data"
:
[
10
,
350
,
75
],
"input1_data"
:
[
10
,
350
,
75
],
"input2_data"
:
[
10
,
75
,
25
]
"input2_data"
:
[
10
,
75
,
25
]
,
}
}
self
.
dynamic_shape
.
max_input_shape
=
{
self
.
dynamic_shape
.
max_input_shape
=
{
"input1_data"
:
[
100
,
350
,
75
],
"input1_data"
:
[
100
,
350
,
75
],
"input2_data"
:
[
100
,
75
,
25
]
"input2_data"
:
[
100
,
75
,
25
]
,
}
}
self
.
dynamic_shape
.
opt_input_shape
=
{
self
.
dynamic_shape
.
opt_input_shape
=
{
"input1_data"
:
[
15
,
350
,
75
],
"input1_data"
:
[
15
,
350
,
75
],
"input2_data"
:
[
15
,
75
,
25
]
"input2_data"
:
[
15
,
75
,
25
]
,
}
}
def
clear_dynamic_shape
():
def
clear_dynamic_shape
():
...
@@ -95,25 +92,29 @@ class TrtConvertBmmTest_dynamic(TrtLayerAutoScanTest):
...
@@ -95,25 +92,29 @@ class TrtConvertBmmTest_dynamic(TrtLayerAutoScanTest):
clear_dynamic_shape
()
clear_dynamic_shape
()
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
1e-5
attrs
,
False
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
1e-5
attrs
,
False
),
1e-3
# The output has little diff between gpu and trt in CI-Windows-Inference
# The output has little diff between gpu and trt in CI-Windows-Inference
tol_fp32
=
1e-4
tol_fp32
=
1e-4
tol_half
=
1e-4
tol_half
=
1e-4
if
(
os
.
name
==
'nt'
)
:
if
os
.
name
==
'nt'
:
tol_fp32
=
1e-2
tol_fp32
=
1e-2
tol_half
=
1e-2
tol_half
=
1e-2
# for dynamic_shape
# for dynamic_shape
generate_dynamic_shape
(
attrs
)
generate_dynamic_shape
(
attrs
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
tol_fp32
attrs
,
True
),
tol_fp32
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
tol_half
attrs
,
True
),
tol_half
def
add_skip_trt_case
(
self
):
def
add_skip_trt_case
(
self
):
pass
pass
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_clip.py
View file @
dbe08e9b
...
@@ -22,12 +22,10 @@ import unittest
...
@@ -22,12 +22,10 @@ import unittest
class
TrtConvertClipTest
(
TrtLayerAutoScanTest
):
class
TrtConvertClipTest
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
return
True
return
True
def
sample_program_configs
(
self
):
def
sample_program_configs
(
self
):
def
generate_input1
(
dims
,
batch
,
attrs
:
List
[
Dict
[
str
,
Any
]]):
def
generate_input1
(
dims
,
batch
,
attrs
:
List
[
Dict
[
str
,
Any
]]):
if
dims
==
1
:
if
dims
==
1
:
return
np
.
ones
([
32
]).
astype
(
np
.
float32
)
return
np
.
ones
([
32
]).
astype
(
np
.
float32
)
...
@@ -46,52 +44,52 @@ class TrtConvertClipTest(TrtLayerAutoScanTest):
...
@@ -46,52 +44,52 @@ class TrtConvertClipTest(TrtLayerAutoScanTest):
for
dims
in
[
1
,
2
,
3
,
4
]:
for
dims
in
[
1
,
2
,
3
,
4
]:
for
batch
in
[
1
,
4
]:
for
batch
in
[
1
,
4
]:
for
op_inputs
in
[{
for
op_inputs
in
[
"X"
:
[
"input_data"
]
{
"X"
:
[
"input_data"
]},
},
{
{
"X"
:
[
"input_data"
],
"Min"
:
[
"Min_"
],
"Max"
:
[
"Max_"
]},
"X"
:
[
"input_data"
],
]:
"Min"
:
[
"Min_"
],
"Max"
:
[
"Max_"
]
}]:
self
.
input_num
=
len
(
op_inputs
)
self
.
input_num
=
len
(
op_inputs
)
self
.
dims
=
dims
self
.
dims
=
dims
dics
=
[{
dics
=
[
"min"
:
np
.
random
.
uniform
(
1
,
10
),
{
"max"
:
np
.
random
.
uniform
(
10
,
20
)
"min"
:
np
.
random
.
uniform
(
1
,
10
),
},
{
"max"
:
np
.
random
.
uniform
(
10
,
20
),
"op_inputs"
:
op_inputs
}]
ops_config
=
[{
"op_type"
:
"clip"
,
"op_inputs"
:
op_inputs
,
"op_outputs"
:
{
"Out"
:
[
"output_data"
]
},
},
"op_attrs"
:
dics
[
0
]
{
"op_inputs"
:
op_inputs
},
}]
]
ops_config
=
[
{
"op_type"
:
"clip"
,
"op_inputs"
:
op_inputs
,
"op_outputs"
:
{
"Out"
:
[
"output_data"
]},
"op_attrs"
:
dics
[
0
],
}
]
ops
=
self
.
generate_op_config
(
ops_config
)
ops
=
self
.
generate_op_config
(
ops_config
)
program_config
=
ProgramConfig
(
program_config
=
ProgramConfig
(
ops
=
ops
,
ops
=
ops
,
weights
=
{
weights
=
{
"Min_"
:
"Min_"
:
TensorConfig
(
TensorConfig
(
data_gen
=
partial
(
generate_weight1
,
dics
)
data_gen
=
partial
(
generate_weight1
,
dics
)
),
),
"Max_"
:
"Max_"
:
TensorConfig
(
TensorConfig
(
data_gen
=
partial
(
generate_weight2
,
dics
)
data_gen
=
partial
(
generate_weight2
,
dics
)
)
)
,
},
},
inputs
=
{
inputs
=
{
"input_data"
:
"input_data"
:
TensorConfig
(
TensorConfig
(
data_gen
=
partial
(
data_gen
=
partial
(
generate_input1
,
dims
,
batch
,
dics
))
generate_input1
,
dims
,
batch
,
dics
)
)
},
},
outputs
=
[
"output_data"
])
outputs
=
[
"output_data"
],
)
yield
program_config
yield
program_config
def
sample_predictor_configs
(
self
,
program_config
):
def
sample_predictor_configs
(
self
,
program_config
):
def
generate_dynamic_shape
(
attrs
):
def
generate_dynamic_shape
(
attrs
):
if
self
.
dims
==
1
:
if
self
.
dims
==
1
:
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
]}
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
]}
...
@@ -135,19 +133,23 @@ class TrtConvertClipTest(TrtLayerAutoScanTest):
...
@@ -135,19 +133,23 @@ class TrtConvertClipTest(TrtLayerAutoScanTest):
clear_dynamic_shape
()
clear_dynamic_shape
()
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
1e-5
attrs
,
False
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
1e-5
attrs
,
False
),
1e-3
# for dynamic_shape
# for dynamic_shape
generate_dynamic_shape
(
attrs
)
generate_dynamic_shape
(
attrs
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
1e-5
attrs
,
True
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
1e-5
attrs
,
True
),
1e-3
def
test
(
self
):
def
test
(
self
):
self
.
run_test
()
self
.
run_test
()
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_concat.py
View file @
dbe08e9b
...
@@ -22,7 +22,6 @@ from typing import Optional, List, Callable, Dict, Any, Set
...
@@ -22,7 +22,6 @@ from typing import Optional, List, Callable, Dict, Any, Set
class
TrtConvertConcatTest
(
TrtLayerAutoScanTest
):
class
TrtConvertConcatTest
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
inputs
=
program_config
.
inputs
inputs
=
program_config
.
inputs
weights
=
program_config
.
weights
weights
=
program_config
.
weights
...
@@ -31,14 +30,13 @@ class TrtConvertConcatTest(TrtLayerAutoScanTest):
...
@@ -31,14 +30,13 @@ class TrtConvertConcatTest(TrtLayerAutoScanTest):
attrs
=
[
attrs
=
[
program_config
.
ops
[
i
].
attrs
for
i
in
range
(
len
(
program_config
.
ops
))
program_config
.
ops
[
i
].
attrs
for
i
in
range
(
len
(
program_config
.
ops
))
]
]
#The input dimension should be less than or equal to the set axis.
#
The input dimension should be less than or equal to the set axis.
if
len
(
inputs
[
'concat_input1'
].
shape
)
<=
attrs
[
0
][
'axis'
]:
if
len
(
inputs
[
'concat_input1'
].
shape
)
<=
attrs
[
0
][
'axis'
]:
return
False
return
False
return
True
return
True
def
sample_program_configs
(
self
):
def
sample_program_configs
(
self
):
def
generate_input1
(
attrs
:
List
[
Dict
[
str
,
Any
]],
batch
):
def
generate_input1
(
attrs
:
List
[
Dict
[
str
,
Any
]],
batch
):
if
self
.
dims
==
4
:
if
self
.
dims
==
4
:
return
np
.
ones
([
batch
,
3
,
24
,
24
]).
astype
(
np
.
float32
)
return
np
.
ones
([
batch
,
3
,
24
,
24
]).
astype
(
np
.
float32
)
...
@@ -79,58 +77,83 @@ class TrtConvertConcatTest(TrtLayerAutoScanTest):
...
@@ -79,58 +77,83 @@ class TrtConvertConcatTest(TrtLayerAutoScanTest):
self
.
num_input
=
num_input
self
.
num_input
=
num_input
self
.
dims
=
dims
self
.
dims
=
dims
dics
=
[{
"axis"
:
axis
},
{}]
dics
=
[{
"axis"
:
axis
},
{}]
dics_intput
=
[{
dics_intput
=
[
"X"
:
{
[
"concat_input1"
,
"concat_input2"
,
"concat_input3"
],
"X"
:
[
"AxisTensor"
:
[
"AxisTensor"
],
"concat_input1"
,
},
{
"concat_input2"
,
"X"
:
"concat_input3"
,
[
"concat_input1"
,
"concat_input2"
,
"concat_input3"
]
],
}]
"AxisTensor"
:
[
"AxisTensor"
],
dics_inputs
=
[{
},
"concat_input1"
:
{
TensorConfig
(
"X"
:
[
data_gen
=
partial
(
generate_input1
,
dics
,
batch
)),
"concat_input1"
,
"concat_input2"
:
"concat_input2"
,
TensorConfig
(
"concat_input3"
,
data_gen
=
partial
(
generate_input2
,
dics
,
batch
)),
]
"concat_input3"
:
},
TensorConfig
(
]
data_gen
=
partial
(
generate_input3
,
dics
,
batch
)),
dics_inputs
=
[
"AxisTensor"
:
{
TensorConfig
(
"concat_input1"
:
TensorConfig
(
data_gen
=
partial
(
generate_weight1
,
dics
))
data_gen
=
partial
(
},
{
generate_input1
,
dics
,
batch
"concat_input1"
:
)
TensorConfig
(
),
data_gen
=
partial
(
generate_input1
,
dics
,
batch
)),
"concat_input2"
:
TensorConfig
(
"concat_input2"
:
data_gen
=
partial
(
TensorConfig
(
generate_input2
,
dics
,
batch
data_gen
=
partial
(
generate_input2
,
dics
,
batch
)),
)
"concat_input3"
:
),
TensorConfig
(
"concat_input3"
:
TensorConfig
(
data_gen
=
partial
(
generate_input3
,
dics
,
batch
))
data_gen
=
partial
(
}]
generate_input3
,
dics
,
batch
ops_config
=
[{
)
"op_type"
:
"concat"
,
),
"op_inputs"
:
dics_intput
[
num_input
],
"AxisTensor"
:
TensorConfig
(
"op_outputs"
:
{
data_gen
=
partial
(
generate_weight1
,
dics
)
"Out"
:
[
"concat_output"
]
),
},
{
"concat_input1"
:
TensorConfig
(
data_gen
=
partial
(
generate_input1
,
dics
,
batch
)
),
"concat_input2"
:
TensorConfig
(
data_gen
=
partial
(
generate_input2
,
dics
,
batch
)
),
"concat_input3"
:
TensorConfig
(
data_gen
=
partial
(
generate_input3
,
dics
,
batch
)
),
},
},
"op_attrs"
:
dics
[
0
]
]
}]
ops_config
=
[
{
"op_type"
:
"concat"
,
"op_inputs"
:
dics_intput
[
num_input
],
"op_outputs"
:
{
"Out"
:
[
"concat_output"
]},
"op_attrs"
:
dics
[
0
],
}
]
ops
=
self
.
generate_op_config
(
ops_config
)
ops
=
self
.
generate_op_config
(
ops_config
)
program_config
=
ProgramConfig
(
program_config
=
ProgramConfig
(
ops
=
ops
,
ops
=
ops
,
weights
=
{},
weights
=
{},
inputs
=
dics_inputs
[
num_input
],
inputs
=
dics_inputs
[
num_input
],
outputs
=
[
"concat_output"
])
outputs
=
[
"concat_output"
],
)
yield
program_config
yield
program_config
def
sample_predictor_configs
(
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
def
generate_dynamic_shape
(
attrs
):
if
self
.
num_input
==
0
:
if
self
.
num_input
==
0
:
if
self
.
dims
==
4
:
if
self
.
dims
==
4
:
...
@@ -138,76 +161,76 @@ class TrtConvertConcatTest(TrtLayerAutoScanTest):
...
@@ -138,76 +161,76 @@ class TrtConvertConcatTest(TrtLayerAutoScanTest):
"concat_input1"
:
[
1
,
3
,
24
,
24
],
"concat_input1"
:
[
1
,
3
,
24
,
24
],
"concat_input2"
:
[
1
,
3
,
24
,
24
],
"concat_input2"
:
[
1
,
3
,
24
,
24
],
"concat_input3"
:
[
1
,
3
,
24
,
24
],
"concat_input3"
:
[
1
,
3
,
24
,
24
],
"AxisTensor"
:
[
1
]
"AxisTensor"
:
[
1
]
,
}
}
self
.
dynamic_shape
.
max_input_shape
=
{
self
.
dynamic_shape
.
max_input_shape
=
{
"concat_input1"
:
[
4
,
3
,
48
,
48
],
"concat_input1"
:
[
4
,
3
,
48
,
48
],
"concat_input2"
:
[
4
,
3
,
48
,
48
],
"concat_input2"
:
[
4
,
3
,
48
,
48
],
"concat_input3"
:
[
4
,
3
,
48
,
48
],
"concat_input3"
:
[
4
,
3
,
48
,
48
],
"AxisTensor"
:
[
1
]
"AxisTensor"
:
[
1
]
,
}
}
self
.
dynamic_shape
.
opt_input_shape
=
{
self
.
dynamic_shape
.
opt_input_shape
=
{
"concat_input1"
:
[
1
,
3
,
24
,
24
],
"concat_input1"
:
[
1
,
3
,
24
,
24
],
"concat_input2"
:
[
1
,
3
,
24
,
24
],
"concat_input2"
:
[
1
,
3
,
24
,
24
],
"concat_input3"
:
[
1
,
3
,
24
,
24
],
"concat_input3"
:
[
1
,
3
,
24
,
24
],
"AxisTensor"
:
[
1
]
"AxisTensor"
:
[
1
]
,
}
}
elif
self
.
dims
==
3
:
elif
self
.
dims
==
3
:
self
.
dynamic_shape
.
min_input_shape
=
{
self
.
dynamic_shape
.
min_input_shape
=
{
"concat_input1"
:
[
1
,
3
,
24
],
"concat_input1"
:
[
1
,
3
,
24
],
"concat_input2"
:
[
1
,
3
,
24
],
"concat_input2"
:
[
1
,
3
,
24
],
"concat_input3"
:
[
1
,
3
,
24
],
"concat_input3"
:
[
1
,
3
,
24
],
"AxisTensor"
:
[
1
]
"AxisTensor"
:
[
1
]
,
}
}
self
.
dynamic_shape
.
max_input_shape
=
{
self
.
dynamic_shape
.
max_input_shape
=
{
"concat_input1"
:
[
4
,
12
,
48
],
"concat_input1"
:
[
4
,
12
,
48
],
"concat_input2"
:
[
4
,
12
,
48
],
"concat_input2"
:
[
4
,
12
,
48
],
"concat_input3"
:
[
4
,
12
,
48
],
"concat_input3"
:
[
4
,
12
,
48
],
"AxisTensor"
:
[
1
]
"AxisTensor"
:
[
1
]
,
}
}
self
.
dynamic_shape
.
opt_input_shape
=
{
self
.
dynamic_shape
.
opt_input_shape
=
{
"concat_input1"
:
[
1
,
3
,
24
],
"concat_input1"
:
[
1
,
3
,
24
],
"concat_input2"
:
[
1
,
3
,
24
],
"concat_input2"
:
[
1
,
3
,
24
],
"concat_input3"
:
[
1
,
3
,
24
],
"concat_input3"
:
[
1
,
3
,
24
],
"AxisTensor"
:
[
1
]
"AxisTensor"
:
[
1
]
,
}
}
elif
self
.
dims
==
2
:
elif
self
.
dims
==
2
:
self
.
dynamic_shape
.
min_input_shape
=
{
self
.
dynamic_shape
.
min_input_shape
=
{
"concat_input1"
:
[
1
,
24
],
"concat_input1"
:
[
1
,
24
],
"concat_input2"
:
[
1
,
24
],
"concat_input2"
:
[
1
,
24
],
"concat_input3"
:
[
1
,
24
],
"concat_input3"
:
[
1
,
24
],
"AxisTensor"
:
[
1
]
"AxisTensor"
:
[
1
]
,
}
}
self
.
dynamic_shape
.
max_input_shape
=
{
self
.
dynamic_shape
.
max_input_shape
=
{
"concat_input1"
:
[
4
,
48
],
"concat_input1"
:
[
4
,
48
],
"concat_input2"
:
[
4
,
48
],
"concat_input2"
:
[
4
,
48
],
"concat_input3"
:
[
4
,
48
],
"concat_input3"
:
[
4
,
48
],
"AxisTensor"
:
[
1
]
"AxisTensor"
:
[
1
]
,
}
}
self
.
dynamic_shape
.
opt_input_shape
=
{
self
.
dynamic_shape
.
opt_input_shape
=
{
"concat_input1"
:
[
1
,
24
],
"concat_input1"
:
[
1
,
24
],
"concat_input2"
:
[
1
,
24
],
"concat_input2"
:
[
1
,
24
],
"concat_input3"
:
[
1
,
24
],
"concat_input3"
:
[
1
,
24
],
"AxisTensor"
:
[
1
]
"AxisTensor"
:
[
1
]
,
}
}
elif
self
.
dims
==
1
:
elif
self
.
dims
==
1
:
self
.
dynamic_shape
.
min_input_shape
=
{
self
.
dynamic_shape
.
min_input_shape
=
{
"concat_input1"
:
[
24
],
"concat_input1"
:
[
24
],
"concat_input2"
:
[
24
],
"concat_input2"
:
[
24
],
"concat_input3"
:
[
24
],
"concat_input3"
:
[
24
],
"AxisTensor"
:
[
0
]
"AxisTensor"
:
[
0
]
,
}
}
self
.
dynamic_shape
.
max_input_shape
=
{
self
.
dynamic_shape
.
max_input_shape
=
{
"concat_input1"
:
[
48
],
"concat_input1"
:
[
48
],
"concat_input2"
:
[
48
],
"concat_input2"
:
[
48
],
"concat_input3"
:
[
48
],
"concat_input3"
:
[
48
],
"AxisTensor"
:
[
0
]
"AxisTensor"
:
[
0
]
,
}
}
self
.
dynamic_shape
.
opt_input_shape
=
{
self
.
dynamic_shape
.
opt_input_shape
=
{
"concat_input1"
:
[
24
],
"concat_input1"
:
[
24
],
"concat_input2"
:
[
24
],
"concat_input2"
:
[
24
],
"concat_input3"
:
[
24
],
"concat_input3"
:
[
24
],
"AxisTensor"
:
[
0
]
"AxisTensor"
:
[
0
]
,
}
}
elif
self
.
num_input
==
1
:
elif
self
.
num_input
==
1
:
if
self
.
dims
==
4
:
if
self
.
dims
==
4
:
...
@@ -219,60 +242,60 @@ class TrtConvertConcatTest(TrtLayerAutoScanTest):
...
@@ -219,60 +242,60 @@ class TrtConvertConcatTest(TrtLayerAutoScanTest):
self
.
dynamic_shape
.
max_input_shape
=
{
self
.
dynamic_shape
.
max_input_shape
=
{
"concat_input1"
:
[
4
,
3
,
48
,
48
],
"concat_input1"
:
[
4
,
3
,
48
,
48
],
"concat_input2"
:
[
4
,
3
,
48
,
48
],
"concat_input2"
:
[
4
,
3
,
48
,
48
],
"concat_input3"
:
[
4
,
3
,
48
,
48
]
"concat_input3"
:
[
4
,
3
,
48
,
48
]
,
}
}
self
.
dynamic_shape
.
opt_input_shape
=
{
self
.
dynamic_shape
.
opt_input_shape
=
{
"concat_input1"
:
[
1
,
3
,
24
,
24
],
"concat_input1"
:
[
1
,
3
,
24
,
24
],
"concat_input2"
:
[
1
,
3
,
24
,
24
],
"concat_input2"
:
[
1
,
3
,
24
,
24
],
"concat_input3"
:
[
1
,
3
,
24
,
24
]
"concat_input3"
:
[
1
,
3
,
24
,
24
]
,
}
}
elif
self
.
dims
==
3
:
elif
self
.
dims
==
3
:
self
.
dynamic_shape
.
min_input_shape
=
{
self
.
dynamic_shape
.
min_input_shape
=
{
"concat_input1"
:
[
1
,
3
,
24
],
"concat_input1"
:
[
1
,
3
,
24
],
"concat_input2"
:
[
1
,
3
,
24
],
"concat_input2"
:
[
1
,
3
,
24
],
"concat_input3"
:
[
1
,
3
,
24
]
"concat_input3"
:
[
1
,
3
,
24
]
,
}
}
self
.
dynamic_shape
.
max_input_shape
=
{
self
.
dynamic_shape
.
max_input_shape
=
{
"concat_input1"
:
[
4
,
12
,
48
],
"concat_input1"
:
[
4
,
12
,
48
],
"concat_input2"
:
[
4
,
12
,
48
],
"concat_input2"
:
[
4
,
12
,
48
],
"concat_input3"
:
[
4
,
12
,
48
]
"concat_input3"
:
[
4
,
12
,
48
]
,
}
}
self
.
dynamic_shape
.
opt_input_shape
=
{
self
.
dynamic_shape
.
opt_input_shape
=
{
"concat_input1"
:
[
1
,
3
,
24
],
"concat_input1"
:
[
1
,
3
,
24
],
"concat_input2"
:
[
1
,
3
,
24
],
"concat_input2"
:
[
1
,
3
,
24
],
"concat_input3"
:
[
1
,
3
,
24
]
"concat_input3"
:
[
1
,
3
,
24
]
,
}
}
elif
self
.
dims
==
2
:
elif
self
.
dims
==
2
:
self
.
dynamic_shape
.
min_input_shape
=
{
self
.
dynamic_shape
.
min_input_shape
=
{
"concat_input1"
:
[
1
,
24
],
"concat_input1"
:
[
1
,
24
],
"concat_input2"
:
[
1
,
24
],
"concat_input2"
:
[
1
,
24
],
"concat_input3"
:
[
1
,
24
]
"concat_input3"
:
[
1
,
24
]
,
}
}
self
.
dynamic_shape
.
max_input_shape
=
{
self
.
dynamic_shape
.
max_input_shape
=
{
"concat_input1"
:
[
4
,
48
],
"concat_input1"
:
[
4
,
48
],
"concat_input2"
:
[
4
,
48
],
"concat_input2"
:
[
4
,
48
],
"concat_input3"
:
[
4
,
48
]
"concat_input3"
:
[
4
,
48
]
,
}
}
self
.
dynamic_shape
.
opt_input_shape
=
{
self
.
dynamic_shape
.
opt_input_shape
=
{
"concat_input1"
:
[
1
,
24
],
"concat_input1"
:
[
1
,
24
],
"concat_input2"
:
[
1
,
24
],
"concat_input2"
:
[
1
,
24
],
"concat_input3"
:
[
1
,
24
]
"concat_input3"
:
[
1
,
24
]
,
}
}
elif
self
.
dims
==
1
:
elif
self
.
dims
==
1
:
self
.
dynamic_shape
.
min_input_shape
=
{
self
.
dynamic_shape
.
min_input_shape
=
{
"concat_input1"
:
[
24
],
"concat_input1"
:
[
24
],
"concat_input2"
:
[
24
],
"concat_input2"
:
[
24
],
"concat_input3"
:
[
24
]
"concat_input3"
:
[
24
]
,
}
}
self
.
dynamic_shape
.
max_input_shape
=
{
self
.
dynamic_shape
.
max_input_shape
=
{
"concat_input1"
:
[
48
],
"concat_input1"
:
[
48
],
"concat_input2"
:
[
48
],
"concat_input2"
:
[
48
],
"concat_input3"
:
[
48
]
"concat_input3"
:
[
48
]
,
}
}
self
.
dynamic_shape
.
opt_input_shape
=
{
self
.
dynamic_shape
.
opt_input_shape
=
{
"concat_input1"
:
[
24
],
"concat_input1"
:
[
24
],
"concat_input2"
:
[
24
],
"concat_input2"
:
[
24
],
"concat_input3"
:
[
24
]
"concat_input3"
:
[
24
]
,
}
}
def
clear_dynamic_shape
():
def
clear_dynamic_shape
():
...
@@ -296,29 +319,33 @@ class TrtConvertConcatTest(TrtLayerAutoScanTest):
...
@@ -296,29 +319,33 @@ class TrtConvertConcatTest(TrtLayerAutoScanTest):
clear_dynamic_shape
()
clear_dynamic_shape
()
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
1e-5
attrs
,
False
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
1e-5
attrs
,
False
),
1e-3
# for dynamic_shape
# for dynamic_shape
generate_dynamic_shape
(
attrs
)
generate_dynamic_shape
(
attrs
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
1e-5
attrs
,
True
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
1e-5
attrs
,
True
),
1e-3
def
add_skip_trt_case
(
self
):
def
add_skip_trt_case
(
self
):
def
teller1
(
program_config
,
predictor_config
):
def
teller1
(
program_config
,
predictor_config
):
if
len
(
program_config
.
inputs
)
==
4
:
if
len
(
program_config
.
inputs
)
==
4
:
return
True
return
True
return
False
return
False
self
.
add_skip_case
(
teller1
,
SkipReasons
.
TRT_NOT_SUPPORT
,
self
.
add_skip_case
(
"INPUT AxisTensor NOT SUPPORT"
)
teller1
,
SkipReasons
.
TRT_NOT_SUPPORT
,
"INPUT AxisTensor NOT SUPPORT"
)
def
test
(
self
):
def
test
(
self
):
self
.
add_skip_trt_case
()
self
.
add_skip_trt_case
()
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_conv2d_transpose.py
View file @
dbe08e9b
...
@@ -22,7 +22,6 @@ from typing import Optional, List, Callable, Dict, Any, Set
...
@@ -22,7 +22,6 @@ from typing import Optional, List, Callable, Dict, Any, Set
class
TrtConvertConv2dTransposeTest
(
TrtLayerAutoScanTest
):
class
TrtConvertConv2dTransposeTest
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
inputs
=
program_config
.
inputs
inputs
=
program_config
.
inputs
weights
=
program_config
.
weights
weights
=
program_config
.
weights
...
@@ -30,8 +29,10 @@ class TrtConvertConv2dTransposeTest(TrtLayerAutoScanTest):
...
@@ -30,8 +29,10 @@ class TrtConvertConv2dTransposeTest(TrtLayerAutoScanTest):
program_config
.
ops
[
i
].
attrs
for
i
in
range
(
len
(
program_config
.
ops
))
program_config
.
ops
[
i
].
attrs
for
i
in
range
(
len
(
program_config
.
ops
))
]
]
if
inputs
[
'input_data'
].
shape
[
if
(
1
]
!=
weights
[
'conv2d_weight'
].
shape
[
1
]
*
attrs
[
0
][
'groups'
]:
inputs
[
'input_data'
].
shape
[
1
]
!=
weights
[
'conv2d_weight'
].
shape
[
1
]
*
attrs
[
0
][
'groups'
]
):
return
False
return
False
if
inputs
[
'input_data'
].
shape
[
1
]
!=
weights
[
'conv2d_weight'
].
shape
[
0
]:
if
inputs
[
'input_data'
].
shape
[
1
]
!=
weights
[
'conv2d_weight'
].
shape
[
0
]:
...
@@ -54,12 +55,13 @@ class TrtConvertConv2dTransposeTest(TrtLayerAutoScanTest):
...
@@ -54,12 +55,13 @@ class TrtConvertConv2dTransposeTest(TrtLayerAutoScanTest):
def
generate_weight1
(
num_channels
,
attrs
:
List
[
Dict
[
str
,
Any
]]):
def
generate_weight1
(
num_channels
,
attrs
:
List
[
Dict
[
str
,
Any
]]):
if
attrs
[
0
][
'groups'
]
==
1
:
if
attrs
[
0
][
'groups'
]
==
1
:
return
np
.
random
.
random
([
num_channels
,
num_channels
,
3
,
return
np
.
random
.
random
(
3
]).
astype
(
np
.
float32
)
[
num_channels
,
num_channels
,
3
,
3
]
).
astype
(
np
.
float32
)
else
:
else
:
return
np
.
random
.
random
(
return
np
.
random
.
random
(
[
num_channels
,
int
(
num_channels
/
2
),
3
,
[
num_channels
,
int
(
num_channels
/
2
),
3
,
3
]
3
]
).
astype
(
np
.
float32
)
).
astype
(
np
.
float32
)
for
num_channels
in
[
2
,
4
,
6
]:
for
num_channels
in
[
2
,
4
,
6
]:
for
batch
in
[
1
,
4
]:
for
batch
in
[
1
,
4
]:
...
@@ -67,99 +69,113 @@ class TrtConvertConv2dTransposeTest(TrtLayerAutoScanTest):
...
@@ -67,99 +69,113 @@ class TrtConvertConv2dTransposeTest(TrtLayerAutoScanTest):
for
paddings
in
[[
0
,
3
],
[
1
,
2
,
3
,
4
]]:
for
paddings
in
[[
0
,
3
],
[
1
,
2
,
3
,
4
]]:
for
groups
in
[
2
]:
for
groups
in
[
2
]:
for
padding_algorithm
in
[
for
padding_algorithm
in
[
'EXPLICIT'
,
'SAME'
,
'VALID'
'EXPLICIT'
,
'SAME'
,
'VALID'
,
]:
]:
for
dilations
in
[[
2
,
2
],
[
1
,
2
]]:
for
dilations
in
[[
2
,
2
],
[
1
,
2
]]:
for
data_format
in
[
'NCHW'
]:
for
data_format
in
[
'NCHW'
]:
self
.
num_channels
=
num_channels
self
.
num_channels
=
num_channels
dics
=
[{
dics
=
[
"data_fromat"
:
data_format
,
{
"dilations"
:
dilations
,
"data_fromat"
:
data_format
,
"padding_algorithm"
:
"dilations"
:
dilations
,
padding_algorithm
,
"padding_algorithm"
:
padding_algorithm
,
"groups"
:
groups
,
"groups"
:
groups
,
"paddings"
:
paddings
,
"paddings"
:
paddings
,
"strides"
:
strides
,
"strides"
:
strides
,
"data_format"
:
data_format
,
"data_format"
:
data_format
,
"output_size"
:
[],
"output_size"
:
[],
"output_padding"
:
[]
"output_padding"
:
[],
}]
}
]
ops_config
=
[{
"op_type"
:
"conv2d_transpose"
,
ops_config
=
[
"op_inputs"
:
{
{
"Input"
:
[
"input_data"
],
"op_type"
:
"conv2d_transpose"
,
"Filter"
:
[
"conv2d_weight"
]
"op_inputs"
:
{
},
"Input"
:
[
"input_data"
],
"op_outputs"
:
{
"Filter"
:
[
"conv2d_weight"
],
"Output"
:
[
"output_data"
]
},
},
"op_outputs"
:
{
"op_attrs"
:
dics
[
0
]
"Output"
:
[
"output_data"
]
}]
},
"op_attrs"
:
dics
[
0
],
}
]
ops
=
self
.
generate_op_config
(
ops
=
self
.
generate_op_config
(
ops_config
)
ops_config
)
program_config
=
ProgramConfig
(
program_config
=
ProgramConfig
(
ops
=
ops
,
ops
=
ops
,
weights
=
{
weights
=
{
"conv2d_weight"
:
"conv2d_weight"
:
TensorConfig
(
TensorConfig
(
data_gen
=
partial
(
data_gen
=
partial
(
generate_weight1
,
generate_weight1
,
num_channels
,
dics
))
num_channels
,
dics
,
)
)
},
},
inputs
=
{
inputs
=
{
"input_data"
:
"input_data"
:
TensorConfig
(
TensorConfig
(
data_gen
=
partial
(
data_gen
=
partial
(
generate_input1
,
batch
,
generate_input1
,
num_channels
,
dics
))
batch
,
num_channels
,
dics
,
)
)
},
},
outputs
=
[
"output_data"
])
outputs
=
[
"output_data"
],
)
yield
program_config
yield
program_config
def
sample_predictor_configs
(
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
def
generate_dynamic_shape
(
attrs
):
if
self
.
num_channels
==
2
:
if
self
.
num_channels
==
2
:
self
.
dynamic_shape
.
min_input_shape
=
{
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
2
,
32
,
32
],
"input_data"
:
[
1
,
2
,
32
,
32
],
"output_data"
:
[
1
,
24
,
32
,
32
]
"output_data"
:
[
1
,
24
,
32
,
32
]
,
}
}
self
.
dynamic_shape
.
max_input_shape
=
{
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
4
,
2
,
64
,
64
],
"input_data"
:
[
4
,
2
,
64
,
64
],
"output_data"
:
[
4
,
24
,
64
,
64
]
"output_data"
:
[
4
,
24
,
64
,
64
]
,
}
}
self
.
dynamic_shape
.
opt_input_shape
=
{
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
1
,
2
,
64
,
64
],
"input_data"
:
[
1
,
2
,
64
,
64
],
"output_data"
:
[
1
,
24
,
64
,
64
]
"output_data"
:
[
1
,
24
,
64
,
64
]
,
}
}
elif
self
.
num_channels
==
4
:
elif
self
.
num_channels
==
4
:
self
.
dynamic_shape
.
min_input_shape
=
{
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
4
,
32
,
32
],
"input_data"
:
[
1
,
4
,
32
,
32
],
"output_data"
:
[
1
,
24
,
32
,
32
]
"output_data"
:
[
1
,
24
,
32
,
32
]
,
}
}
self
.
dynamic_shape
.
max_input_shape
=
{
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
4
,
4
,
64
,
64
],
"input_data"
:
[
4
,
4
,
64
,
64
],
"output_data"
:
[
4
,
24
,
64
,
64
]
"output_data"
:
[
4
,
24
,
64
,
64
]
,
}
}
self
.
dynamic_shape
.
opt_input_shape
=
{
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
1
,
4
,
64
,
64
],
"input_data"
:
[
1
,
4
,
64
,
64
],
"output_data"
:
[
1
,
24
,
64
,
64
]
"output_data"
:
[
1
,
24
,
64
,
64
]
,
}
}
else
:
else
:
self
.
dynamic_shape
.
min_input_shape
=
{
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
6
,
32
,
32
],
"input_data"
:
[
1
,
6
,
32
,
32
],
"output_data"
:
[
1
,
24
,
32
,
32
]
"output_data"
:
[
1
,
24
,
32
,
32
]
,
}
}
self
.
dynamic_shape
.
max_input_shape
=
{
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
4
,
6
,
64
,
64
],
"input_data"
:
[
4
,
6
,
64
,
64
],
"output_data"
:
[
4
,
24
,
64
,
64
]
"output_data"
:
[
4
,
24
,
64
,
64
]
,
}
}
self
.
dynamic_shape
.
opt_input_shape
=
{
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
1
,
6
,
64
,
64
],
"input_data"
:
[
1
,
6
,
64
,
64
],
"output_data"
:
[
1
,
24
,
64
,
64
]
"output_data"
:
[
1
,
24
,
64
,
64
]
,
}
}
def
clear_dynamic_shape
():
def
clear_dynamic_shape
():
...
@@ -178,10 +194,12 @@ class TrtConvertConv2dTransposeTest(TrtLayerAutoScanTest):
...
@@ -178,10 +194,12 @@ class TrtConvertConv2dTransposeTest(TrtLayerAutoScanTest):
clear_dynamic_shape
()
clear_dynamic_shape
()
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
1e-5
attrs
,
False
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
(
1e-5
,
1e-3
)
attrs
,
False
),
(
1e-3
,
1e-3
)
# self.trt_param.precision = paddle_infer.PrecisionType.Int8
# self.trt_param.precision = paddle_infer.PrecisionType.Int8
# yield self.create_inference_config(), generate_trt_nodes_num(
# yield self.create_inference_config(), generate_trt_nodes_num(
# attrs, False), (1e-5, 1e-5)
# attrs, False), (1e-5, 1e-5)
...
@@ -190,24 +208,26 @@ class TrtConvertConv2dTransposeTest(TrtLayerAutoScanTest):
...
@@ -190,24 +208,26 @@ class TrtConvertConv2dTransposeTest(TrtLayerAutoScanTest):
generate_dynamic_shape
(
attrs
)
generate_dynamic_shape
(
attrs
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
1e-5
attrs
,
True
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
(
1e-5
,
1e-3
)
attrs
,
True
),
(
1e-3
,
1e-3
)
# self.trt_param.precision = paddle_infer.PrecisionType.Int8
# self.trt_param.precision = paddle_infer.PrecisionType.Int8
# yield self.create_inference_config(), generate_trt_nodes_num(
# yield self.create_inference_config(), generate_trt_nodes_num(
# attrs, True), (1e-5, 1e-5)
# attrs, True), (1e-5, 1e-5)
def
add_skip_trt_case
(
self
):
def
add_skip_trt_case
(
self
):
def
teller1
(
program_config
,
predictor_config
):
def
teller1
(
program_config
,
predictor_config
):
if
self
.
trt_param
.
precision
==
paddle_infer
.
PrecisionType
.
Int8
:
if
self
.
trt_param
.
precision
==
paddle_infer
.
PrecisionType
.
Int8
:
return
True
return
True
return
False
return
False
self
.
add_skip_case
(
self
.
add_skip_case
(
teller1
,
SkipReasons
.
TRT_NOT_IMPLEMENTED
,
teller1
,
"When precisionType is int8 without relu op, output is different between Trt and Paddle."
SkipReasons
.
TRT_NOT_IMPLEMENTED
,
"When precisionType is int8 without relu op, output is different between Trt and Paddle."
,
)
)
def
test
(
self
):
def
test
(
self
):
...
@@ -221,7 +241,6 @@ class TrtConvertConv2dTransposeTest(TrtLayerAutoScanTest):
...
@@ -221,7 +241,6 @@ class TrtConvertConv2dTransposeTest(TrtLayerAutoScanTest):
# Special case
# Special case
class
TrtConvertConv2dTransposeTest2
(
TrtLayerAutoScanTest
):
class
TrtConvertConv2dTransposeTest2
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
ver
=
paddle_infer
.
get_trt_compile_version
()
ver
=
paddle_infer
.
get_trt_compile_version
()
if
ver
[
0
]
*
1000
+
ver
[
1
]
*
100
+
ver
[
2
]
*
10
<
7000
:
if
ver
[
0
]
*
1000
+
ver
[
1
]
*
100
+
ver
[
2
]
*
10
<
7000
:
...
@@ -241,49 +260,52 @@ class TrtConvertConv2dTransposeTest2(TrtLayerAutoScanTest):
...
@@ -241,49 +260,52 @@ class TrtConvertConv2dTransposeTest2(TrtLayerAutoScanTest):
batch
=
1
batch
=
1
self
.
num_channels
=
num_channels
self
.
num_channels
=
num_channels
dics
=
[{
dics
=
[
"data_fromat"
:
'NCHW'
,
{
"dilations"
:
[
1
,
1
],
"data_fromat"
:
'NCHW'
,
"padding_algorithm"
:
'EXPLICIT'
,
"dilations"
:
[
1
,
1
],
"groups"
:
1
,
"padding_algorithm"
:
'EXPLICIT'
,
"paddings"
:
[
1
,
1
],
"groups"
:
1
,
"strides"
:
[
2
,
2
],
"paddings"
:
[
1
,
1
],
"output_padding"
:
[
1
,
1
],
"strides"
:
[
2
,
2
],
"output_size"
:
[],
"output_padding"
:
[
1
,
1
],
}]
"output_size"
:
[],
}
ops_config
=
[{
]
"op_type"
:
"conv2d_transpose"
,
"op_inputs"
:
{
ops_config
=
[
"Input"
:
[
"input_data"
],
{
"Filter"
:
[
"conv2d_weight"
]
"op_type"
:
"conv2d_transpose"
,
},
"op_inputs"
:
{
"op_outputs"
:
{
"Input"
:
[
"input_data"
],
"Output"
:
[
"output_data"
]
"Filter"
:
[
"conv2d_weight"
],
},
},
"op_attrs"
:
dics
[
0
]
"op_outputs"
:
{
"Output"
:
[
"output_data"
]},
}]
"op_attrs"
:
dics
[
0
],
}
]
ops
=
self
.
generate_op_config
(
ops_config
)
ops
=
self
.
generate_op_config
(
ops_config
)
program_config
=
ProgramConfig
(
program_config
=
ProgramConfig
(
ops
=
ops
,
ops
=
ops
,
weights
=
{
weights
=
{
"conv2d_weight"
:
"conv2d_weight"
:
TensorConfig
(
TensorConfig
(
data_gen
=
partial
(
generate_weight1
,
num_channels
,
dics
)
data_gen
=
partial
(
generate_weight1
,
num_channels
,
dics
)
)
)
},
},
inputs
=
{
inputs
=
{
"input_data"
:
"input_data"
:
TensorConfig
(
TensorConfig
(
data_gen
=
partial
(
generate_input1
,
batch
,
data_gen
=
partial
(
generate_input1
,
batch
,
num_channels
,
dics
)
num_channels
,
dics
)
)
)
},
},
outputs
=
[
"output_data"
])
outputs
=
[
"output_data"
],
)
yield
program_config
yield
program_config
def
sample_predictor_configs
(
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
def
generate_dynamic_shape
(
attrs
):
self
.
dynamic_shape
.
min_input_shape
=
{
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
128
,
20
,
30
],
"input_data"
:
[
1
,
128
,
20
,
30
],
...
@@ -311,19 +333,23 @@ class TrtConvertConv2dTransposeTest2(TrtLayerAutoScanTest):
...
@@ -311,19 +333,23 @@ class TrtConvertConv2dTransposeTest2(TrtLayerAutoScanTest):
clear_dynamic_shape
()
clear_dynamic_shape
()
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
1e-4
attrs
,
False
),
1e-4
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
(
1e0
,
1e-3
)
attrs
,
False
),
(
1e0
,
1e-3
)
# for dynamic_shape
# for dynamic_shape
generate_dynamic_shape
(
attrs
)
generate_dynamic_shape
(
attrs
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
1e-4
attrs
,
True
),
1e-4
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
(
1e0
,
1e-3
)
attrs
,
True
),
(
1e0
,
1e-3
)
def
add_skip_trt_case
(
self
):
def
add_skip_trt_case
(
self
):
pass
pass
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_dropout.py
View file @
dbe08e9b
...
@@ -22,12 +22,10 @@ from typing import Optional, List, Callable, Dict, Any, Set
...
@@ -22,12 +22,10 @@ from typing import Optional, List, Callable, Dict, Any, Set
class
TrtConvertDropoutTest
(
TrtLayerAutoScanTest
):
class
TrtConvertDropoutTest
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
return
True
return
True
def
sample_program_configs
(
self
):
def
sample_program_configs
(
self
):
def
generate_input1
(
dims
,
batch
,
attrs
:
List
[
Dict
[
str
,
Any
]]):
def
generate_input1
(
dims
,
batch
,
attrs
:
List
[
Dict
[
str
,
Any
]]):
if
dims
==
1
:
if
dims
==
1
:
return
np
.
ones
([
64
]).
astype
(
np
.
float32
)
return
np
.
ones
([
64
]).
astype
(
np
.
float32
)
...
@@ -42,47 +40,57 @@ class TrtConvertDropoutTest(TrtLayerAutoScanTest):
...
@@ -42,47 +40,57 @@ class TrtConvertDropoutTest(TrtLayerAutoScanTest):
for
batch
in
[
1
,
2
,
4
]:
for
batch
in
[
1
,
2
,
4
]:
for
fix_seed
in
[
False
,
True
]:
for
fix_seed
in
[
False
,
True
]:
for
dropout_implementation
in
[
for
dropout_implementation
in
[
"downgrade_in_infer"
,
"upscale_in_train"
"downgrade_in_infer"
,
"upscale_in_train"
,
]:
]:
for
dropout_prob
in
[
np
.
random
.
random
()]:
for
dropout_prob
in
[
np
.
random
.
random
()]:
for
seed
in
[
0
,
64
,
128
,
512
]:
for
seed
in
[
0
,
64
,
128
,
512
]:
self
.
dims
=
dims
self
.
dims
=
dims
dics
=
[{
dics
=
[
"fix_seed"
:
fix_seed
,
{
"dropout_implementation"
:
"fix_seed"
:
fix_seed
,
dropout_implementation
,
"dropout_implementation"
:
dropout_implementation
,
"dropout_prob"
:
dropout_prob
,
"dropout_prob"
:
dropout_prob
,
"seed"
:
seed
,
"seed"
:
seed
,
"is_test"
:
True
"is_test"
:
True
,
}]
}
]
ops_config
=
[{
"op_type"
:
"dropout"
,
ops_config
=
[
"op_inputs"
:
{
{
"X"
:
[
"input_data"
],
"op_type"
:
"dropout"
,
},
"op_inputs"
:
{
"op_outputs"
:
{
"X"
:
[
"input_data"
],
"Out"
:
[
"dropout_output_data"
]
},
},
"op_outputs"
:
{
"op_attrs"
:
dics
[
0
]
"Out"
:
[
"dropout_output_data"
]
}]
},
"op_attrs"
:
dics
[
0
],
}
]
ops
=
self
.
generate_op_config
(
ops_config
)
ops
=
self
.
generate_op_config
(
ops_config
)
program_config
=
ProgramConfig
(
program_config
=
ProgramConfig
(
ops
=
ops
,
ops
=
ops
,
weights
=
{},
weights
=
{},
inputs
=
{
inputs
=
{
"input_data"
:
"input_data"
:
TensorConfig
(
TensorConfig
(
data_gen
=
partial
(
data_gen
=
partial
(
generate_input1
,
dims
,
batch
,
dics
))
generate_input1
,
dims
,
batch
,
dics
,
)
)
},
},
outputs
=
[
"dropout_output_data"
])
outputs
=
[
"dropout_output_data"
],
)
yield
program_config
yield
program_config
def
sample_predictor_configs
(
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
def
generate_dynamic_shape
(
attrs
):
if
self
.
dims
==
1
:
if
self
.
dims
==
1
:
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
]}
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
]}
...
@@ -128,19 +136,23 @@ class TrtConvertDropoutTest(TrtLayerAutoScanTest):
...
@@ -128,19 +136,23 @@ class TrtConvertDropoutTest(TrtLayerAutoScanTest):
clear_dynamic_shape
()
clear_dynamic_shape
()
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
1e-5
attrs
,
False
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
1e-5
attrs
,
False
),
1e-3
# for dynamic_shape
# for dynamic_shape
generate_dynamic_shape
(
attrs
)
generate_dynamic_shape
(
attrs
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
1e-5
attrs
,
True
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
1e-5
attrs
,
True
),
1e-3
def
add_skip_trt_case
(
self
):
def
add_skip_trt_case
(
self
):
pass
pass
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_elementwise.py
View file @
dbe08e9b
...
@@ -24,12 +24,10 @@ from typing import Optional, List, Callable, Dict, Any, Set
...
@@ -24,12 +24,10 @@ from typing import Optional, List, Callable, Dict, Any, Set
# This is the special test case with weight including batch dimension
# This is the special test case with weight including batch dimension
# I don't want to mess up the code written by others, so I wrote a class specifically
# I don't want to mess up the code written by others, so I wrote a class specifically
class
TrtConvertElementwiseTest_one_input_special_case0
(
TrtLayerAutoScanTest
):
class
TrtConvertElementwiseTest_one_input_special_case0
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
return
True
return
True
def
sample_program_configs
(
self
):
def
sample_program_configs
(
self
):
def
generate_input
(
shape
):
def
generate_input
(
shape
):
return
np
.
random
.
random
(
shape
).
astype
(
np
.
float32
)
return
np
.
random
.
random
(
shape
).
astype
(
np
.
float32
)
...
@@ -39,44 +37,50 @@ class TrtConvertElementwiseTest_one_input_special_case0(TrtLayerAutoScanTest):
...
@@ -39,44 +37,50 @@ class TrtConvertElementwiseTest_one_input_special_case0(TrtLayerAutoScanTest):
for
batch
in
[
1
,
4
]:
for
batch
in
[
1
,
4
]:
for
shape
in
[[
batch
,
32
,
16
,
32
]]:
for
shape
in
[[
batch
,
32
,
16
,
32
]]:
for
op_type
in
[
for
op_type
in
[
"elementwise_add"
,
"elementwise_mul"
,
"elementwise_sub"
,
"elementwise_add"
,
"elementwise_div"
,
"elementwise_pow"
,
"elementwise_min"
,
"elementwise_mul"
,
"elementwise_max"
"elementwise_sub"
,
"elementwise_div"
,
"elementwise_pow"
,
"elementwise_min"
,
"elementwise_max"
,
]:
]:
for
axis
in
[
-
1
]:
for
axis
in
[
-
1
]:
self
.
dims
=
len
(
shape
)
self
.
dims
=
len
(
shape
)
dics
=
[{
"axis"
:
axis
}]
dics
=
[{
"axis"
:
axis
}]
ops_config
=
[
{
ops_config
=
[
"op_type"
:
op_type
,
{
"op_inputs"
:
{
"op_type"
:
op_type
,
"
X"
:
[
"input_data"
],
"
op_inputs"
:
{
"Y"
:
[
"weight
"
]
"X"
:
[
"input_data
"
]
,
}
,
"Y"
:
[
"weight"
]
,
"op_outputs"
:
{
},
"Out"
:
[
"output_data"
]
"op_outputs"
:
{
"Out"
:
[
"output_data"
]
},
}
,
"op_attrs"
:
dics
[
0
]
,
"op_attrs"
:
dics
[
0
]
}
}
]
]
ops
=
self
.
generate_op_config
(
ops_config
)
ops
=
self
.
generate_op_config
(
ops_config
)
program_config
=
ProgramConfig
(
program_config
=
ProgramConfig
(
ops
=
ops
,
ops
=
ops
,
weights
=
{
weights
=
{
"weight"
:
"weight"
:
TensorConfig
(
TensorConfig
(
data_gen
=
partial
(
generate_weight
))
data_gen
=
partial
(
generate_weight
)
)
},
},
inputs
=
{
inputs
=
{
"input_data"
:
"input_data"
:
TensorConfig
(
TensorConfig
(
data_gen
=
partial
(
generate_input
,
shape
)
data_gen
=
partial
(
generate_input
,
shape
)
),
),
},
},
outputs
=
[
"output_data"
])
outputs
=
[
"output_data"
],
)
yield
program_config
yield
program_config
def
sample_predictor_configs
(
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
def
generate_dynamic_shape
(
attrs
):
# The input.dims[1] must be equal to the weight's length.
# The input.dims[1] must be equal to the weight's length.
if
self
.
dims
==
4
:
if
self
.
dims
==
4
:
...
@@ -106,19 +110,23 @@ class TrtConvertElementwiseTest_one_input_special_case0(TrtLayerAutoScanTest):
...
@@ -106,19 +110,23 @@ class TrtConvertElementwiseTest_one_input_special_case0(TrtLayerAutoScanTest):
clear_dynamic_shape
()
clear_dynamic_shape
()
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
(
1e-5
,
1e-5
)
attrs
,
False
),
(
1e-5
,
1e-5
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
(
1e-3
,
1e-3
)
attrs
,
False
),
(
1e-3
,
1e-3
)
# for dynamic_shape
# for dynamic_shape
generate_dynamic_shape
(
attrs
)
generate_dynamic_shape
(
attrs
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
(
1e-5
,
1e-5
)
attrs
,
True
),
(
1e-5
,
1e-5
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
(
1e-3
,
1e-3
)
attrs
,
True
),
(
1e-3
,
1e-3
)
def
add_skip_trt_case
(
self
):
def
add_skip_trt_case
(
self
):
pass
pass
...
@@ -130,12 +138,10 @@ class TrtConvertElementwiseTest_one_input_special_case0(TrtLayerAutoScanTest):
...
@@ -130,12 +138,10 @@ class TrtConvertElementwiseTest_one_input_special_case0(TrtLayerAutoScanTest):
# This is the special test case
# This is the special test case
class
TrtConvertElementwiseTest_one_input_special_case1
(
TrtLayerAutoScanTest
):
class
TrtConvertElementwiseTest_one_input_special_case1
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
return
True
return
True
def
sample_program_configs
(
self
):
def
sample_program_configs
(
self
):
def
generate_input
(
shape
):
def
generate_input
(
shape
):
return
np
.
random
.
random
(
shape
).
astype
(
np
.
float32
)
return
np
.
random
.
random
(
shape
).
astype
(
np
.
float32
)
...
@@ -144,44 +150,47 @@ class TrtConvertElementwiseTest_one_input_special_case1(TrtLayerAutoScanTest):
...
@@ -144,44 +150,47 @@ class TrtConvertElementwiseTest_one_input_special_case1(TrtLayerAutoScanTest):
for
shape
in
[[
32
]]:
for
shape
in
[[
32
]]:
for
op_type
in
[
for
op_type
in
[
"elementwise_add"
,
"elementwise_mul"
,
"elementwise_sub"
,
"elementwise_add"
,
"elementwise_div"
,
"elementwise_pow"
,
"elementwise_min"
,
"elementwise_mul"
,
"elementwise_max"
"elementwise_sub"
,
"elementwise_div"
,
"elementwise_pow"
,
"elementwise_min"
,
"elementwise_max"
,
]:
]:
for
axis
in
[
-
1
]:
for
axis
in
[
-
1
]:
self
.
dims
=
len
(
shape
)
self
.
dims
=
len
(
shape
)
dics
=
[{
"axis"
:
axis
}]
dics
=
[{
"axis"
:
axis
}]
ops_config
=
[{
ops_config
=
[
"op_type"
:
op_type
,
{
"op_inputs"
:
{
"op_type"
:
op_type
,
"X"
:
[
"input_data"
],
"op_inputs"
:
{
"X"
:
[
"input_data"
],
"Y"
:
[
"weight"
]},
"Y"
:
[
"weight"
]
"op_outputs"
:
{
"Out"
:
[
"output_data"
]},
},
"op_attrs"
:
dics
[
0
],
"op_outputs"
:
{
}
"Out"
:
[
"output_data"
]
]
},
"op_attrs"
:
dics
[
0
]
}]
ops
=
self
.
generate_op_config
(
ops_config
)
ops
=
self
.
generate_op_config
(
ops_config
)
program_config
=
ProgramConfig
(
program_config
=
ProgramConfig
(
ops
=
ops
,
ops
=
ops
,
weights
=
{
weights
=
{
"weight"
:
"weight"
:
TensorConfig
(
TensorConfig
(
data_gen
=
partial
(
generate_weight
))
data_gen
=
partial
(
generate_weight
)
)
},
},
inputs
=
{
inputs
=
{
"input_data"
:
"input_data"
:
TensorConfig
(
TensorConfig
(
data_gen
=
partial
(
generate_input
,
shape
)
data_gen
=
partial
(
generate_input
,
shape
)
),
),
},
},
outputs
=
[
"output_data"
])
outputs
=
[
"output_data"
],
)
yield
program_config
yield
program_config
def
sample_predictor_configs
(
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
def
generate_dynamic_shape
(
attrs
):
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
32
]}
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
32
]}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
64
]}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
64
]}
...
@@ -205,19 +214,23 @@ class TrtConvertElementwiseTest_one_input_special_case1(TrtLayerAutoScanTest):
...
@@ -205,19 +214,23 @@ class TrtConvertElementwiseTest_one_input_special_case1(TrtLayerAutoScanTest):
clear_dynamic_shape
()
clear_dynamic_shape
()
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
(
1e-5
,
1e-5
)
attrs
,
False
),
(
1e-5
,
1e-5
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
(
1e-3
,
1e-3
)
attrs
,
False
),
(
1e-3
,
1e-3
)
# for dynamic_shape
# for dynamic_shape
generate_dynamic_shape
(
attrs
)
generate_dynamic_shape
(
attrs
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
(
1e-5
,
1e-5
)
attrs
,
True
),
(
1e-5
,
1e-5
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
(
1e-3
,
1e-3
)
attrs
,
True
),
(
1e-3
,
1e-3
)
def
add_skip_trt_case
(
self
):
def
add_skip_trt_case
(
self
):
pass
pass
...
@@ -228,12 +241,10 @@ class TrtConvertElementwiseTest_one_input_special_case1(TrtLayerAutoScanTest):
...
@@ -228,12 +241,10 @@ class TrtConvertElementwiseTest_one_input_special_case1(TrtLayerAutoScanTest):
class
TrtConvertElementwiseTest_one_input
(
TrtLayerAutoScanTest
):
class
TrtConvertElementwiseTest_one_input
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
return
True
return
True
def
sample_program_configs
(
self
):
def
sample_program_configs
(
self
):
def
generate_input
(
shape
):
def
generate_input
(
shape
):
return
np
.
random
.
random
(
shape
).
astype
(
np
.
float32
)
return
np
.
random
.
random
(
shape
).
astype
(
np
.
float32
)
...
@@ -241,47 +252,57 @@ class TrtConvertElementwiseTest_one_input(TrtLayerAutoScanTest):
...
@@ -241,47 +252,57 @@ class TrtConvertElementwiseTest_one_input(TrtLayerAutoScanTest):
return
np
.
random
.
randn
(
32
).
astype
(
np
.
float32
)
return
np
.
random
.
randn
(
32
).
astype
(
np
.
float32
)
for
batch
in
[
1
,
4
]:
for
batch
in
[
1
,
4
]:
for
shape
in
[[
32
],
[
batch
,
32
],
[
batch
,
32
,
32
],
for
shape
in
[
[
batch
,
32
,
16
,
32
]]:
[
32
],
[
batch
,
32
],
[
batch
,
32
,
32
],
[
batch
,
32
,
16
,
32
],
]:
for
op_type
in
[
for
op_type
in
[
"elementwise_add"
,
"elementwise_mul"
,
"elementwise_sub"
,
"elementwise_add"
,
"elementwise_div"
,
"elementwise_pow"
,
"elementwise_min"
,
"elementwise_mul"
,
"elementwise_max"
"elementwise_sub"
,
"elementwise_div"
,
"elementwise_pow"
,
"elementwise_min"
,
"elementwise_max"
,
]:
]:
for
axis
in
[
-
1
if
len
(
shape
)
==
1
else
1
]:
for
axis
in
[
-
1
if
len
(
shape
)
==
1
else
1
]:
self
.
dims
=
len
(
shape
)
self
.
dims
=
len
(
shape
)
dics
=
[{
"axis"
:
axis
}]
dics
=
[{
"axis"
:
axis
}]
ops_config
=
[
{
ops_config
=
[
"op_type"
:
op_type
,
{
"op_inputs"
:
{
"op_type"
:
op_type
,
"
X"
:
[
"input_data"
],
"
op_inputs"
:
{
"Y"
:
[
"weight
"
]
"X"
:
[
"input_data
"
]
,
}
,
"Y"
:
[
"weight"
]
,
"op_outputs"
:
{
},
"Out"
:
[
"output_data"
]
"op_outputs"
:
{
"Out"
:
[
"output_data"
]
},
}
,
"op_attrs"
:
dics
[
0
]
,
"op_attrs"
:
dics
[
0
]
}
}
]
]
ops
=
self
.
generate_op_config
(
ops_config
)
ops
=
self
.
generate_op_config
(
ops_config
)
program_config
=
ProgramConfig
(
program_config
=
ProgramConfig
(
ops
=
ops
,
ops
=
ops
,
weights
=
{
weights
=
{
"weight"
:
"weight"
:
TensorConfig
(
TensorConfig
(
data_gen
=
partial
(
generate_weight
))
data_gen
=
partial
(
generate_weight
)
)
},
},
inputs
=
{
inputs
=
{
"input_data"
:
"input_data"
:
TensorConfig
(
TensorConfig
(
data_gen
=
partial
(
generate_input
,
shape
)
data_gen
=
partial
(
generate_input
,
shape
)
),
),
},
},
outputs
=
[
"output_data"
])
outputs
=
[
"output_data"
],
)
yield
program_config
yield
program_config
def
sample_predictor_configs
(
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
def
generate_dynamic_shape
(
attrs
):
# The input.dims[1] must be equal to the weight's length.
# The input.dims[1] must be equal to the weight's length.
if
self
.
dims
==
1
:
if
self
.
dims
==
1
:
...
@@ -325,19 +346,23 @@ class TrtConvertElementwiseTest_one_input(TrtLayerAutoScanTest):
...
@@ -325,19 +346,23 @@ class TrtConvertElementwiseTest_one_input(TrtLayerAutoScanTest):
clear_dynamic_shape
()
clear_dynamic_shape
()
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
(
1e-5
,
1e-5
)
attrs
,
False
),
(
1e-5
,
1e-5
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
(
1e-3
,
1e-3
)
attrs
,
False
),
(
1e-3
,
1e-3
)
# for dynamic_shape
# for dynamic_shape
generate_dynamic_shape
(
attrs
)
generate_dynamic_shape
(
attrs
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
(
1e-5
,
1e-5
)
attrs
,
True
),
(
1e-5
,
1e-5
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
(
1e-3
,
1e-3
)
attrs
,
True
),
(
1e-3
,
1e-3
)
def
add_skip_trt_case
(
self
):
def
add_skip_trt_case
(
self
):
pass
pass
...
@@ -348,108 +373,112 @@ class TrtConvertElementwiseTest_one_input(TrtLayerAutoScanTest):
...
@@ -348,108 +373,112 @@ class TrtConvertElementwiseTest_one_input(TrtLayerAutoScanTest):
class
TrtConvertElementwiseTest_two_input_without_broadcast
(
class
TrtConvertElementwiseTest_two_input_without_broadcast
(
TrtLayerAutoScanTest
):
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
return
True
return
True
def
sample_program_configs
(
self
):
def
sample_program_configs
(
self
):
def
generate_input
(
shape
):
def
generate_input
(
shape
):
return
np
.
random
.
random
(
shape
).
astype
(
np
.
float32
)
return
np
.
random
.
random
(
shape
).
astype
(
np
.
float32
)
for
shape
in
[[
4
],
[
4
,
32
],
[
2
,
64
,
32
],
[
1
,
8
,
16
,
32
]]:
for
shape
in
[[
4
],
[
4
,
32
],
[
2
,
32
,
16
],
[
1
,
8
,
16
,
32
]]:
for
op_type
in
[
for
op_type
in
[
"elementwise_add"
,
"elementwise_mul"
,
"elementwise_sub"
,
"elementwise_add"
,
"elementwise_div"
,
"elementwise_pow"
,
"elementwise_min"
,
"elementwise_mul"
,
"elementwise_max"
"elementwise_sub"
,
"elementwise_div"
,
"elementwise_pow"
,
"elementwise_min"
,
"elementwise_max"
,
]:
]:
for
axis
in
[
0
,
-
1
]:
for
axis
in
[
0
,
-
1
]:
self
.
dims
=
len
(
shape
)
self
.
dims
=
len
(
shape
)
dics
=
[{
"axis"
:
axis
}]
dics
=
[{
"axis"
:
axis
}]
ops_config
=
[
{
ops_config
=
[
"op_type"
:
op_type
,
{
"op_inputs"
:
{
"op_type"
:
op_type
,
"
X"
:
[
"input_data1"
],
"
op_inputs"
:
{
"Y
"
:
[
"input_data
2
"
]
"X
"
:
[
"input_data
1
"
]
,
}
,
"Y"
:
[
"input_data2"
]
,
"op_outputs"
:
{
},
"Out"
:
[
"output_data"
]
"op_outputs"
:
{
"Out"
:
[
"output_data"
]
},
}
,
"op_attrs"
:
dics
[
0
]
,
"op_attrs"
:
dics
[
0
]
}
}
]
]
ops
=
self
.
generate_op_config
(
ops_config
)
ops
=
self
.
generate_op_config
(
ops_config
)
program_config
=
ProgramConfig
(
program_config
=
ProgramConfig
(
ops
=
ops
,
ops
=
ops
,
weights
=
{},
weights
=
{},
inputs
=
{
inputs
=
{
"input_data1"
:
"input_data1"
:
TensorConfig
(
TensorConfig
(
data_gen
=
partial
(
generate_input
,
shape
)
data_gen
=
partial
(
generate_input
,
shape
)
),
),
"input_data2"
:
"input_data2"
:
TensorConfig
(
TensorConfig
(
data_gen
=
partial
(
generate_input
,
shape
)
data_gen
=
partial
(
generate_input
,
shape
)
)
)
,
},
},
outputs
=
[
"output_data"
])
outputs
=
[
"output_data"
],
)
yield
program_config
yield
program_config
def
sample_predictor_configs
(
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
def
generate_dynamic_shape
(
attrs
):
if
self
.
dims
==
1
:
if
self
.
dims
==
1
:
self
.
dynamic_shape
.
min_input_shape
=
{
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data1"
:
[
1
],
"input_data1"
:
[
1
],
"input_data2"
:
[
1
]
"input_data2"
:
[
1
]
,
}
}
self
.
dynamic_shape
.
max_input_shape
=
{
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data1"
:
[
128
],
"input_data1"
:
[
128
],
"input_data2"
:
[
128
]
"input_data2"
:
[
128
]
,
}
}
self
.
dynamic_shape
.
opt_input_shape
=
{
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data1"
:
[
32
],
"input_data1"
:
[
32
],
"input_data2"
:
[
32
]
"input_data2"
:
[
32
]
,
}
}
elif
self
.
dims
==
2
:
elif
self
.
dims
==
2
:
self
.
dynamic_shape
.
min_input_shape
=
{
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data1"
:
[
1
,
4
],
"input_data1"
:
[
1
,
4
],
"input_data2"
:
[
1
,
4
]
"input_data2"
:
[
1
,
4
]
,
}
}
self
.
dynamic_shape
.
max_input_shape
=
{
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data1"
:
[
128
,
256
],
"input_data1"
:
[
128
,
256
],
"input_data2"
:
[
128
,
256
]
"input_data2"
:
[
128
,
256
]
,
}
}
self
.
dynamic_shape
.
opt_input_shape
=
{
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data1"
:
[
32
,
64
],
"input_data1"
:
[
32
,
64
],
"input_data2"
:
[
32
,
64
]
"input_data2"
:
[
32
,
64
]
,
}
}
elif
self
.
dims
==
3
:
elif
self
.
dims
==
3
:
self
.
dynamic_shape
.
min_input_shape
=
{
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data1"
:
[
1
,
4
,
4
],
"input_data1"
:
[
1
,
4
,
4
],
"input_data2"
:
[
1
,
4
,
4
]
"input_data2"
:
[
1
,
4
,
4
]
,
}
}
self
.
dynamic_shape
.
max_input_shape
=
{
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data1"
:
[
128
,
128
,
256
],
"input_data1"
:
[
128
,
128
,
256
],
"input_data2"
:
[
128
,
128
,
256
]
"input_data2"
:
[
128
,
128
,
256
]
,
}
}
self
.
dynamic_shape
.
opt_input_shape
=
{
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data1"
:
[
2
,
64
,
64
],
"input_data1"
:
[
2
,
32
,
16
],
"input_data2"
:
[
2
,
64
,
64
]
"input_data2"
:
[
2
,
32
,
16
],
}
}
elif
self
.
dims
==
4
:
elif
self
.
dims
==
4
:
self
.
dynamic_shape
.
min_input_shape
=
{
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data1"
:
[
1
,
4
,
4
,
4
],
"input_data1"
:
[
1
,
4
,
4
,
4
],
"input_data2"
:
[
1
,
4
,
4
,
4
]
"input_data2"
:
[
1
,
4
,
4
,
4
]
,
}
}
self
.
dynamic_shape
.
max_input_shape
=
{
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data1"
:
[
8
,
128
,
64
,
128
],
"input_data1"
:
[
8
,
128
,
64
,
128
],
"input_data2"
:
[
8
,
128
,
64
,
128
]
"input_data2"
:
[
8
,
128
,
64
,
128
]
,
}
}
self
.
dynamic_shape
.
opt_input_shape
=
{
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data1"
:
[
2
,
64
,
32
,
32
],
"input_data1"
:
[
2
,
64
,
32
,
32
],
"input_data2"
:
[
2
,
64
,
32
,
32
]
"input_data2"
:
[
2
,
64
,
32
,
32
]
,
}
}
def
clear_dynamic_shape
():
def
clear_dynamic_shape
():
...
@@ -470,10 +499,12 @@ class TrtConvertElementwiseTest_two_input_without_broadcast(
...
@@ -470,10 +499,12 @@ class TrtConvertElementwiseTest_two_input_without_broadcast(
clear_dynamic_shape
()
clear_dynamic_shape
()
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
(
1e-5
,
1e-5
)
attrs
,
False
),
(
1e-5
,
1e-5
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
(
1e-3
,
1e-3
)
attrs
,
False
),
(
1e-3
,
1e-3
)
# for dynamic_shape
# for dynamic_shape
generate_dynamic_shape
(
attrs
)
generate_dynamic_shape
(
attrs
)
...
@@ -491,7 +522,6 @@ class TrtConvertElementwiseTest_two_input_without_broadcast(
...
@@ -491,7 +522,6 @@ class TrtConvertElementwiseTest_two_input_without_broadcast(
class
TrtConvertElementwiseTest_two_input_with_broadcast
(
TrtLayerAutoScanTest
):
class
TrtConvertElementwiseTest_two_input_with_broadcast
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
inputs
=
program_config
.
inputs
inputs
=
program_config
.
inputs
if
len
(
inputs
[
'input_data1'
].
shape
)
!=
len
(
inputs
[
'input_data2'
].
shape
):
if
len
(
inputs
[
'input_data1'
].
shape
)
!=
len
(
inputs
[
'input_data2'
].
shape
):
...
@@ -500,7 +530,6 @@ class TrtConvertElementwiseTest_two_input_with_broadcast(TrtLayerAutoScanTest):
...
@@ -500,7 +530,6 @@ class TrtConvertElementwiseTest_two_input_with_broadcast(TrtLayerAutoScanTest):
return
True
return
True
def
sample_program_configs
(
self
):
def
sample_program_configs
(
self
):
def
generate_input
(
shape
):
def
generate_input
(
shape
):
return
np
.
random
.
random
(
shape
).
astype
(
np
.
float32
)
return
np
.
random
.
random
(
shape
).
astype
(
np
.
float32
)
...
@@ -512,8 +541,12 @@ class TrtConvertElementwiseTest_two_input_with_broadcast(TrtLayerAutoScanTest):
...
@@ -512,8 +541,12 @@ class TrtConvertElementwiseTest_two_input_with_broadcast(TrtLayerAutoScanTest):
input2_shape5_list
=
[[
32
],
[
2
,
1
,
32
],
[
4
,
1
,
1
,
32
]]
input2_shape5_list
=
[[
32
],
[
2
,
1
,
32
],
[
4
,
1
,
1
,
32
]]
input2_shape6_list
=
[[
1
,
32
],
[
1
,
32
],
[
1
,
1
,
1
,
32
]]
input2_shape6_list
=
[[
1
,
32
],
[
1
,
32
],
[
1
,
1
,
1
,
32
]]
input2_shape_list
=
[
input2_shape_list
=
[
input2_shape1_list
,
input2_shape2_list
,
input2_shape3_list
,
input2_shape1_list
,
input2_shape4_list
,
input2_shape5_list
,
input2_shape6_list
input2_shape2_list
,
input2_shape3_list
,
input2_shape4_list
,
input2_shape5_list
,
input2_shape6_list
,
]
]
axis1_list
=
[[
-
1
],
[
1
,
-
1
],
[
1
,
-
1
]]
axis1_list
=
[[
-
1
],
[
1
,
-
1
],
[
1
,
-
1
]]
axis2_list
=
[[
-
1
],
[
0
],
[
0
]]
axis2_list
=
[[
-
1
],
[
0
],
[
0
]]
...
@@ -522,8 +555,12 @@ class TrtConvertElementwiseTest_two_input_with_broadcast(TrtLayerAutoScanTest):
...
@@ -522,8 +555,12 @@ class TrtConvertElementwiseTest_two_input_with_broadcast(TrtLayerAutoScanTest):
axis5_list
=
[[
-
1
,
1
],
[
-
1
,
0
],
[
-
1
,
0
]]
axis5_list
=
[[
-
1
,
1
],
[
-
1
,
0
],
[
-
1
,
0
]]
axis6_list
=
[[
-
1
,
0
],
[
-
1
,
1
],
[
-
1
,
0
]]
axis6_list
=
[[
-
1
,
0
],
[
-
1
,
1
],
[
-
1
,
0
]]
axis_list
=
[
axis_list
=
[
axis1_list
,
axis2_list
,
axis3_list
,
axis4_list
,
axis5_list
,
axis1_list
,
axis6_list
axis2_list
,
axis3_list
,
axis4_list
,
axis5_list
,
axis6_list
,
]
]
for
i
in
range
(
3
):
for
i
in
range
(
3
):
...
@@ -531,66 +568,75 @@ class TrtConvertElementwiseTest_two_input_with_broadcast(TrtLayerAutoScanTest):
...
@@ -531,66 +568,75 @@ class TrtConvertElementwiseTest_two_input_with_broadcast(TrtLayerAutoScanTest):
for
j
in
range
(
6
):
for
j
in
range
(
6
):
input2_shape
=
input2_shape_list
[
j
][
i
]
input2_shape
=
input2_shape_list
[
j
][
i
]
for
op_type
in
[
for
op_type
in
[
"elementwise_add"
,
"elementwise_add"
,
"elementwise_mul"
,
"elementwise_mul"
,
"elementwise_sub"
,
"elementwise_sub"
,
"elementwise_div"
,
"elementwise_div"
,
"elementwise_pow"
,
"elementwise_pow"
,
"elementwise_min"
,
"elementwise_min"
,
"elementwise_max"
,
"elementwise_max"
,
]:
]:
for
axis
in
axis_list
[
j
][
i
]:
for
axis
in
axis_list
[
j
][
i
]:
self
.
shape1
=
input1_shape
self
.
shape1
=
input1_shape
self
.
shape2
=
input2_shape
self
.
shape2
=
input2_shape
dics
=
[{
"axis"
:
axis
}]
dics
=
[{
"axis"
:
axis
}]
ops_config
=
[
{
ops_config
=
[
"op_type"
:
op_type
,
{
"op_inputs"
:
{
"op_type"
:
op_type
,
"
X"
:
[
"input_data1"
],
"
op_inputs"
:
{
"Y
"
:
[
"input_data
2
"
]
"X
"
:
[
"input_data
1
"
]
,
}
,
"Y"
:
[
"input_data2"
]
,
"op_outputs"
:
{
},
"Out"
:
[
"output_data"
]
"op_outputs"
:
{
"Out"
:
[
"output_data"
]
},
}
,
"op_attrs"
:
dics
[
0
]
,
"op_attrs"
:
dics
[
0
]
}
}
]
]
ops
=
self
.
generate_op_config
(
ops_config
)
ops
=
self
.
generate_op_config
(
ops_config
)
program_config
=
ProgramConfig
(
program_config
=
ProgramConfig
(
ops
=
ops
,
ops
=
ops
,
weights
=
{},
weights
=
{},
inputs
=
{
inputs
=
{
"input_data1"
:
"input_data1"
:
TensorConfig
(
TensorConfig
(
data_gen
=
partial
(
data_gen
=
partial
(
generate_input
,
input1_shape
)),
generate_input
,
input1_shape
"input_data2"
:
)
TensorConfig
(
data_gen
=
partial
(
),
generate_input
,
input2_shape
))
"input_data2"
:
TensorConfig
(
data_gen
=
partial
(
generate_input
,
input2_shape
)
),
},
},
outputs
=
[
"output_data"
])
outputs
=
[
"output_data"
],
)
yield
program_config
yield
program_config
def
sample_predictor_configs
(
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
def
generate_dynamic_shape
(
attrs
):
max_shape
=
[[
128
],
[
128
,
128
],
[
128
,
128
,
128
],
max_shape
=
[
[
128
,
128
,
128
,
128
]]
[
128
],
[
128
,
128
],
[
128
,
128
,
128
],
[
128
,
128
,
128
,
128
],
]
min_shape
=
[[
1
],
[
1
,
1
],
[
1
,
1
,
1
],
[
1
,
1
,
1
,
1
]]
min_shape
=
[[
1
],
[
1
,
1
],
[
1
,
1
,
1
],
[
1
,
1
,
1
,
1
]]
opt_shape
=
[[
32
],
[
32
,
32
],
[
32
,
32
,
32
],
[
32
,
32
,
32
,
32
]]
opt_shape
=
[[
32
],
[
32
,
32
],
[
32
,
32
,
32
],
[
32
,
32
,
32
,
32
]]
self
.
dynamic_shape
.
min_input_shape
=
{
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data1"
:
min_shape
[
len
(
self
.
shape1
)
-
1
],
"input_data1"
:
min_shape
[
len
(
self
.
shape1
)
-
1
],
"input_data2"
:
min_shape
[
len
(
self
.
shape2
)
-
1
]
"input_data2"
:
min_shape
[
len
(
self
.
shape2
)
-
1
]
,
}
}
self
.
dynamic_shape
.
max_input_shape
=
{
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data1"
:
max_shape
[
len
(
self
.
shape1
)
-
1
],
"input_data1"
:
max_shape
[
len
(
self
.
shape1
)
-
1
],
"input_data2"
:
max_shape
[
len
(
self
.
shape2
)
-
1
]
"input_data2"
:
max_shape
[
len
(
self
.
shape2
)
-
1
]
,
}
}
self
.
dynamic_shape
.
opt_input_shape
=
{
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data1"
:
opt_shape
[
len
(
self
.
shape1
)
-
1
],
"input_data1"
:
opt_shape
[
len
(
self
.
shape1
)
-
1
],
"input_data2"
:
opt_shape
[
len
(
self
.
shape2
)
-
1
]
"input_data2"
:
opt_shape
[
len
(
self
.
shape2
)
-
1
]
,
}
}
def
clear_dynamic_shape
():
def
clear_dynamic_shape
():
...
@@ -626,12 +672,10 @@ class TrtConvertElementwiseTest_two_input_with_broadcast(TrtLayerAutoScanTest):
...
@@ -626,12 +672,10 @@ class TrtConvertElementwiseTest_two_input_with_broadcast(TrtLayerAutoScanTest):
class
TrtConvertElementwiseTest_one_input_corner_case
(
TrtLayerAutoScanTest
):
class
TrtConvertElementwiseTest_one_input_corner_case
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
return
True
return
True
def
sample_program_configs
(
self
):
def
sample_program_configs
(
self
):
def
generate_input
(
shape
):
def
generate_input
(
shape
):
return
np
.
random
.
random
(
shape
).
astype
(
np
.
float32
)
return
np
.
random
.
random
(
shape
).
astype
(
np
.
float32
)
...
@@ -640,52 +684,58 @@ class TrtConvertElementwiseTest_one_input_corner_case(TrtLayerAutoScanTest):
...
@@ -640,52 +684,58 @@ class TrtConvertElementwiseTest_one_input_corner_case(TrtLayerAutoScanTest):
return
np
.
random
.
rand
(
32
).
astype
(
np
.
float32
)
return
np
.
random
.
rand
(
32
).
astype
(
np
.
float32
)
for
batch
in
[
1
,
2
,
4
]:
for
batch
in
[
1
,
2
,
4
]:
for
shape
in
[[
32
],
[
batch
,
32
],
[
batch
,
32
,
32
],
for
shape
in
[
[
batch
,
32
,
16
,
32
]]:
[
32
],
[
batch
,
32
],
[
batch
,
32
,
32
],
[
batch
,
32
,
16
,
32
],
]:
for
op_type
in
[
for
op_type
in
[
"elementwise_add"
,
"elementwise_add"
,
"elementwise_mul"
,
"elementwise_mul"
,
"elementwise_sub"
,
"elementwise_sub"
,
"elementwise_div"
,
"elementwise_div"
,
"elementwise_pow"
,
"elementwise_pow"
,
"elementwise_min"
,
"elementwise_min"
,
"elementwise_max"
,
"elementwise_max"
,
]:
]:
self
.
op_type
=
op_type
self
.
op_type
=
op_type
for
axis
in
[
-
1
if
len
(
shape
)
==
1
else
1
]:
for
axis
in
[
-
1
if
len
(
shape
)
==
1
else
1
]:
self
.
dims
=
len
(
shape
)
self
.
dims
=
len
(
shape
)
dics
=
[{
"axis"
:
axis
}]
dics
=
[{
"axis"
:
axis
}]
ops_config
=
[
{
ops_config
=
[
"op_type"
:
op_type
,
{
"op_inputs"
:
{
"op_type"
:
op_type
,
"
X"
:
[
"weight"
],
"
op_inputs"
:
{
"Y"
:
[
"input_data
"
]
"X"
:
[
"weight
"
]
,
}
,
"Y"
:
[
"input_data"
]
,
"op_outputs"
:
{
},
"Out"
:
[
"output_data"
]
"op_outputs"
:
{
"Out"
:
[
"output_data"
]
},
}
,
"op_attrs"
:
dics
[
0
]
,
"op_attrs"
:
dics
[
0
]
}
}
]
]
ops
=
self
.
generate_op_config
(
ops_config
)
ops
=
self
.
generate_op_config
(
ops_config
)
program_config
=
ProgramConfig
(
program_config
=
ProgramConfig
(
ops
=
ops
,
ops
=
ops
,
weights
=
{
weights
=
{
"weight"
:
"weight"
:
TensorConfig
(
TensorConfig
(
data_gen
=
partial
(
generate_weight
))
data_gen
=
partial
(
generate_weight
)
)
},
},
inputs
=
{
inputs
=
{
"input_data"
:
"input_data"
:
TensorConfig
(
TensorConfig
(
data_gen
=
partial
(
generate_input
,
shape
)
data_gen
=
partial
(
generate_input
,
shape
)
),
),
},
},
outputs
=
[
"output_data"
])
outputs
=
[
"output_data"
],
)
yield
program_config
yield
program_config
def
sample_predictor_configs
(
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
def
generate_dynamic_shape
(
attrs
):
# The input.dims[1] must be equal to the weight's length.
# The input.dims[1] must be equal to the weight's length.
if
self
.
dims
==
1
:
if
self
.
dims
==
1
:
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_equal.py
View file @
dbe08e9b
...
@@ -22,7 +22,6 @@ from typing import Optional, List, Callable, Dict, Any, Set
...
@@ -22,7 +22,6 @@ from typing import Optional, List, Callable, Dict, Any, Set
class
TrtConvertElementwiseTest_one_input_corner_case
(
TrtLayerAutoScanTest
):
class
TrtConvertElementwiseTest_one_input_corner_case
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
attrs
=
[
attrs
=
[
program_config
.
ops
[
i
].
attrs
for
i
in
range
(
len
(
program_config
.
ops
))
program_config
.
ops
[
i
].
attrs
for
i
in
range
(
len
(
program_config
.
ops
))
...
@@ -35,7 +34,6 @@ class TrtConvertElementwiseTest_one_input_corner_case(TrtLayerAutoScanTest):
...
@@ -35,7 +34,6 @@ class TrtConvertElementwiseTest_one_input_corner_case(TrtLayerAutoScanTest):
return
True
return
True
def
sample_program_configs
(
self
):
def
sample_program_configs
(
self
):
def
generate_input
(
shape
):
def
generate_input
(
shape
):
return
np
.
random
.
random
(
shape
).
astype
(
np
.
float32
)
return
np
.
random
.
random
(
shape
).
astype
(
np
.
float32
)
...
@@ -44,86 +42,84 @@ class TrtConvertElementwiseTest_one_input_corner_case(TrtLayerAutoScanTest):
...
@@ -44,86 +42,84 @@ class TrtConvertElementwiseTest_one_input_corner_case(TrtLayerAutoScanTest):
for
axis
in
[
-
1
if
len
(
shape
)
==
1
else
1
]:
for
axis
in
[
-
1
if
len
(
shape
)
==
1
else
1
]:
self
.
dims
=
len
(
shape
)
self
.
dims
=
len
(
shape
)
dics
=
[{
"axis"
:
axis
},
{
"in_dtype"
:
0
,
"out_dtype"
:
5
}]
dics
=
[{
"axis"
:
axis
},
{
"in_dtype"
:
0
,
"out_dtype"
:
5
}]
ops_config
=
[{
ops_config
=
[
"op_type"
:
"equal"
,
{
"op_inputs"
:
{
"op_type"
:
"equal"
,
"X"
:
[
"input_data1"
],
"op_inputs"
:
{
"Y"
:
[
"input_data2"
]
"X"
:
[
"input_data1"
],
},
"Y"
:
[
"input_data2"
],
"op_outputs"
:
{
},
"Out"
:
[
"compare_output_data"
]
"op_outputs"
:
{
"Out"
:
[
"compare_output_data"
]},
"op_attrs"
:
dics
[
0
],
},
},
"op_attrs"
:
dics
[
0
]
{
},
{
"op_type"
:
"cast"
,
"op_type
"
:
"c
ast"
,
"op_inputs"
:
{
"X
"
:
[
"c
ompare_output_data"
]}
,
"op_
in
puts"
:
{
"op_
out
puts"
:
{
"Out"
:
[
"output_data"
]},
"
X"
:
[
"compare_output_data"
]
"
op_attrs"
:
dics
[
1
],
},
},
"op_outputs"
:
{
]
"Out"
:
[
"output_data"
]
},
"op_attrs"
:
dics
[
1
]
}]
ops
=
self
.
generate_op_config
(
ops_config
)
ops
=
self
.
generate_op_config
(
ops_config
)
program_config
=
ProgramConfig
(
program_config
=
ProgramConfig
(
ops
=
ops
,
ops
=
ops
,
weights
=
{},
weights
=
{},
inputs
=
{
inputs
=
{
"input_data1"
:
"input_data1"
:
TensorConfig
(
TensorConfig
(
data_gen
=
partial
(
generate_input
,
shape
)
data_gen
=
partial
(
generate_input
,
shape
)
),
),
"input_data2"
:
"input_data2"
:
TensorConfig
(
TensorConfig
(
data_gen
=
partial
(
generate_input
,
shape
)
data_gen
=
partial
(
generate_input
,
shape
)
)
)
,
},
},
outputs
=
[
"output_data"
])
outputs
=
[
"output_data"
],
)
yield
program_config
yield
program_config
def
sample_predictor_configs
(
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
def
generate_dynamic_shape
(
attrs
):
# The input.dims[1] must be equal to the weight's length.
# The input.dims[1] must be equal to the weight's length.
if
self
.
dims
==
2
:
if
self
.
dims
==
2
:
self
.
dynamic_shape
.
min_input_shape
=
{
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data1"
:
[
1
,
1
],
"input_data1"
:
[
1
,
1
],
"input_data2"
:
[
1
,
1
]
"input_data2"
:
[
1
,
1
]
,
}
}
self
.
dynamic_shape
.
max_input_shape
=
{
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data1"
:
[
4
,
1
],
"input_data1"
:
[
4
,
1
],
"input_data2"
:
[
4
,
1
]
"input_data2"
:
[
4
,
1
]
,
}
}
self
.
dynamic_shape
.
opt_input_shape
=
{
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data1"
:
[
2
,
1
],
"input_data1"
:
[
2
,
1
],
"input_data2"
:
[
2
,
1
]
"input_data2"
:
[
2
,
1
]
,
}
}
elif
self
.
dims
==
3
:
elif
self
.
dims
==
3
:
self
.
dynamic_shape
.
min_input_shape
=
{
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data1"
:
[
1
,
1
,
4
],
"input_data1"
:
[
1
,
1
,
4
],
"input_data2"
:
[
1
,
1
,
4
]
"input_data2"
:
[
1
,
1
,
4
]
,
}
}
self
.
dynamic_shape
.
max_input_shape
=
{
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data1"
:
[
4
,
1
,
256
],
"input_data1"
:
[
4
,
1
,
256
],
"input_data2"
:
[
1
,
1
,
256
]
"input_data2"
:
[
1
,
1
,
256
]
,
}
}
self
.
dynamic_shape
.
opt_input_shape
=
{
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data1"
:
[
2
,
1
,
16
],
"input_data1"
:
[
2
,
1
,
16
],
"input_data2"
:
[
2
,
1
,
16
]
"input_data2"
:
[
2
,
1
,
16
]
,
}
}
elif
self
.
dims
==
4
:
elif
self
.
dims
==
4
:
self
.
dynamic_shape
.
min_input_shape
=
{
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data1"
:
[
1
,
1
,
4
,
4
],
"input_data1"
:
[
1
,
1
,
4
,
4
],
"input_data2"
:
[
1
,
1
,
4
,
4
]
"input_data2"
:
[
1
,
1
,
4
,
4
]
,
}
}
self
.
dynamic_shape
.
max_input_shape
=
{
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data1"
:
[
4
,
1
,
128
,
256
],
"input_data1"
:
[
4
,
1
,
128
,
256
],
"input_data2"
:
[
4
,
1
,
128
,
256
]
"input_data2"
:
[
4
,
1
,
128
,
256
]
,
}
}
self
.
dynamic_shape
.
opt_input_shape
=
{
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data1"
:
[
2
,
1
,
32
,
16
],
"input_data1"
:
[
2
,
1
,
32
,
16
],
"input_data2"
:
[
2
,
1
,
32
,
16
]
"input_data2"
:
[
2
,
1
,
32
,
16
]
,
}
}
def
clear_dynamic_shape
():
def
clear_dynamic_shape
():
...
@@ -144,19 +140,23 @@ class TrtConvertElementwiseTest_one_input_corner_case(TrtLayerAutoScanTest):
...
@@ -144,19 +140,23 @@ class TrtConvertElementwiseTest_one_input_corner_case(TrtLayerAutoScanTest):
clear_dynamic_shape
()
clear_dynamic_shape
()
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
1e-5
attrs
,
False
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
1e-5
attrs
,
False
),
1e-3
# for dynamic_shape
# for dynamic_shape
generate_dynamic_shape
(
attrs
)
generate_dynamic_shape
(
attrs
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
1e-5
attrs
,
True
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
1e-5
attrs
,
True
),
1e-3
def
test
(
self
):
def
test
(
self
):
self
.
run_test
()
self
.
run_test
()
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_fc.py
View file @
dbe08e9b
...
@@ -23,10 +23,9 @@ import os
...
@@ -23,10 +23,9 @@ import os
class
TrtConvertFcTest
(
TrtLayerAutoScanTest
):
class
TrtConvertFcTest
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
# The output has diff between gpu and trt in CI windows
# The output has diff between gpu and trt in CI windows
if
(
os
.
name
==
'nt'
)
:
if
os
.
name
==
'nt'
:
return
False
return
False
return
True
return
True
...
@@ -34,12 +33,14 @@ class TrtConvertFcTest(TrtLayerAutoScanTest):
...
@@ -34,12 +33,14 @@ class TrtConvertFcTest(TrtLayerAutoScanTest):
self
.
trt_param
.
workspace_size
=
1073741824
self
.
trt_param
.
workspace_size
=
1073741824
def
generate_input1
(
batch
,
attrs
:
List
[
Dict
[
str
,
Any
]]):
def
generate_input1
(
batch
,
attrs
:
List
[
Dict
[
str
,
Any
]]):
return
np
.
random
.
random
([
batch
,
3
,
64
,
(
int
)(
attrs
[
0
][
"m"
]
/
2
),
return
np
.
random
.
random
(
2
]).
astype
(
np
.
float32
)
[
batch
,
3
,
64
,
(
int
)(
attrs
[
0
][
"m"
]
/
2
),
2
]
).
astype
(
np
.
float32
)
def
generate_w
(
batch
,
attrs
:
List
[
Dict
[
str
,
Any
]]):
def
generate_w
(
batch
,
attrs
:
List
[
Dict
[
str
,
Any
]]):
return
np
.
random
.
random
([
attrs
[
0
][
"m"
],
return
np
.
random
.
random
([
attrs
[
0
][
"m"
],
attrs
[
0
][
"n"
]]).
astype
(
attrs
[
0
][
"n"
]]).
astype
(
np
.
float32
)
np
.
float32
)
def
generate_bias
(
batch
,
attrs
:
List
[
Dict
[
str
,
Any
]]):
def
generate_bias
(
batch
,
attrs
:
List
[
Dict
[
str
,
Any
]]):
return
np
.
random
.
random
([
attrs
[
0
][
"n"
]]).
astype
(
np
.
float32
)
return
np
.
random
.
random
([
attrs
[
0
][
"n"
]]).
astype
(
np
.
float32
)
...
@@ -53,7 +54,7 @@ class TrtConvertFcTest(TrtLayerAutoScanTest):
...
@@ -53,7 +54,7 @@ class TrtConvertFcTest(TrtLayerAutoScanTest):
"m"
:
m
,
"m"
:
m
,
"n"
:
n
,
"n"
:
n
,
},
},
{}
{}
,
]
]
ops_config
=
[
ops_config
=
[
...
@@ -62,12 +63,10 @@ class TrtConvertFcTest(TrtLayerAutoScanTest):
...
@@ -62,12 +63,10 @@ class TrtConvertFcTest(TrtLayerAutoScanTest):
"op_inputs"
:
{
"op_inputs"
:
{
"Input"
:
[
"input_data"
],
"Input"
:
[
"input_data"
],
"W"
:
[
"w_data"
],
"W"
:
[
"w_data"
],
"Bias"
:
[
"bias_data"
]
"Bias"
:
[
"bias_data"
],
},
"op_outputs"
:
{
"Out"
:
[
"output_data"
]
},
},
"op_attrs"
:
dics
[
0
]
"op_outputs"
:
{
"Out"
:
[
"output_data"
]},
"op_attrs"
:
dics
[
0
],
},
},
]
]
...
@@ -76,24 +75,26 @@ class TrtConvertFcTest(TrtLayerAutoScanTest):
...
@@ -76,24 +75,26 @@ class TrtConvertFcTest(TrtLayerAutoScanTest):
program_config
=
ProgramConfig
(
program_config
=
ProgramConfig
(
ops
=
ops
,
ops
=
ops
,
weights
=
{
weights
=
{
"w_data"
:
"w_data"
:
TensorConfig
(
TensorConfig
(
data_gen
=
partial
(
generate_w
,
batch
,
dics
)),
data_gen
=
partial
(
generate_w
,
batch
,
dics
)
"bias_data"
:
),
TensorConfig
(
"bias_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_bias
,
batch
,
dics
))
data_gen
=
partial
(
generate_bias
,
batch
,
dics
)
),
},
},
inputs
=
{
inputs
=
{
"input_data"
:
"input_data"
:
TensorConfig
(
TensorConfig
(
data_gen
=
partial
(
generate_input1
,
batch
,
dics
)
data_gen
=
partial
(
generate_input1
,
batch
,
dics
)
),
),
},
},
outputs
=
[
"output_data"
])
outputs
=
[
"output_data"
],
)
yield
program_config
yield
program_config
def
sample_predictor_configs
(
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
def
generate_dynamic_shape
(
attrs
):
self
.
dynamic_shape
.
min_input_shape
=
{
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
3
,
32
,
16
,
2
],
"input_data"
:
[
1
,
3
,
32
,
16
,
2
],
...
@@ -121,19 +122,23 @@ class TrtConvertFcTest(TrtLayerAutoScanTest):
...
@@ -121,19 +122,23 @@ class TrtConvertFcTest(TrtLayerAutoScanTest):
# clear_dynamic_shape()
# clear_dynamic_shape()
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
1e-5
attrs
,
False
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
(
1e-5
,
1e-5
)
attrs
,
False
),
(
1e-3
,
1e-3
)
# for dynamic_shape
# for dynamic_shape
generate_dynamic_shape
(
attrs
)
generate_dynamic_shape
(
attrs
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
1e-5
attrs
,
True
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
(
1e-5
,
1e-5
)
attrs
,
True
),
(
1e-3
,
1e-3
)
def
test
(
self
):
def
test
(
self
):
self
.
run_test
()
self
.
run_test
()
...
@@ -143,10 +148,9 @@ class TrtConvertFcTest(TrtLayerAutoScanTest):
...
@@ -143,10 +148,9 @@ class TrtConvertFcTest(TrtLayerAutoScanTest):
class
TrtConvertFcTest2
(
TrtLayerAutoScanTest
):
class
TrtConvertFcTest2
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
# The output has diff between gpu and trt in CI windows
# The output has diff between gpu and trt in CI windows
if
(
os
.
name
==
'nt'
)
:
if
os
.
name
==
'nt'
:
return
False
return
False
return
True
return
True
...
@@ -157,8 +161,9 @@ class TrtConvertFcTest2(TrtLayerAutoScanTest):
...
@@ -157,8 +161,9 @@ class TrtConvertFcTest2(TrtLayerAutoScanTest):
return
np
.
random
.
random
([
batch
,
3
,
64
,
14
]).
astype
(
np
.
float32
)
return
np
.
random
.
random
([
batch
,
3
,
64
,
14
]).
astype
(
np
.
float32
)
def
generate_w
(
batch
,
attrs
:
List
[
Dict
[
str
,
Any
]]):
def
generate_w
(
batch
,
attrs
:
List
[
Dict
[
str
,
Any
]]):
return
np
.
random
.
random
([
attrs
[
0
][
"m"
],
return
np
.
random
.
random
([
attrs
[
0
][
"m"
],
attrs
[
0
][
"n"
]]).
astype
(
attrs
[
0
][
"n"
]]).
astype
(
np
.
float32
)
np
.
float32
)
def
generate_bias
(
batch
,
attrs
:
List
[
Dict
[
str
,
Any
]]):
def
generate_bias
(
batch
,
attrs
:
List
[
Dict
[
str
,
Any
]]):
return
np
.
random
.
random
([
attrs
[
0
][
"n"
]]).
astype
(
np
.
float32
)
return
np
.
random
.
random
([
attrs
[
0
][
"n"
]]).
astype
(
np
.
float32
)
...
@@ -172,7 +177,7 @@ class TrtConvertFcTest2(TrtLayerAutoScanTest):
...
@@ -172,7 +177,7 @@ class TrtConvertFcTest2(TrtLayerAutoScanTest):
"m"
:
m
,
"m"
:
m
,
"n"
:
n
,
"n"
:
n
,
},
},
{}
{}
,
]
]
ops_config
=
[
ops_config
=
[
...
@@ -181,12 +186,10 @@ class TrtConvertFcTest2(TrtLayerAutoScanTest):
...
@@ -181,12 +186,10 @@ class TrtConvertFcTest2(TrtLayerAutoScanTest):
"op_inputs"
:
{
"op_inputs"
:
{
"Input"
:
[
"input_data"
],
"Input"
:
[
"input_data"
],
"W"
:
[
"w_data"
],
"W"
:
[
"w_data"
],
"Bias"
:
[
"bias_data"
]
"Bias"
:
[
"bias_data"
]
,
},
},
"op_outputs"
:
{
"op_outputs"
:
{
"Out"
:
[
"output_data"
]},
"Out"
:
[
"output_data"
]
"op_attrs"
:
dics
[
0
],
},
"op_attrs"
:
dics
[
0
]
},
},
]
]
...
@@ -195,24 +198,26 @@ class TrtConvertFcTest2(TrtLayerAutoScanTest):
...
@@ -195,24 +198,26 @@ class TrtConvertFcTest2(TrtLayerAutoScanTest):
program_config
=
ProgramConfig
(
program_config
=
ProgramConfig
(
ops
=
ops
,
ops
=
ops
,
weights
=
{
weights
=
{
"w_data"
:
"w_data"
:
TensorConfig
(
TensorConfig
(
data_gen
=
partial
(
generate_w
,
batch
,
dics
)),
data_gen
=
partial
(
generate_w
,
batch
,
dics
)
"bias_data"
:
),
TensorConfig
(
"bias_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_bias
,
batch
,
dics
))
data_gen
=
partial
(
generate_bias
,
batch
,
dics
)
),
},
},
inputs
=
{
inputs
=
{
"input_data"
:
"input_data"
:
TensorConfig
(
TensorConfig
(
data_gen
=
partial
(
generate_input1
,
batch
,
dics
)
data_gen
=
partial
(
generate_input1
,
batch
,
dics
)
),
),
},
},
outputs
=
[
"output_data"
])
outputs
=
[
"output_data"
],
)
yield
program_config
yield
program_config
def
sample_predictor_configs
(
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
():
def
generate_dynamic_shape
():
self
.
dynamic_shape
.
min_input_shape
=
{
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
3
,
32
,
14
],
"input_data"
:
[
1
,
3
,
32
,
14
],
...
@@ -234,14 +239,14 @@ class TrtConvertFcTest2(TrtLayerAutoScanTest):
...
@@ -234,14 +239,14 @@ class TrtConvertFcTest2(TrtLayerAutoScanTest):
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
(
1
,
2
),
1e-5
yield
self
.
create_inference_config
(),
(
1
,
2
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
(
1
,
2
),
(
1e-
5
,
1e-
5
)
yield
self
.
create_inference_config
(),
(
1
,
2
),
(
1e-
3
,
1e-
3
)
# for dynamic_shape
# for dynamic_shape
generate_dynamic_shape
()
generate_dynamic_shape
()
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
(
1
,
2
),
1e-5
yield
self
.
create_inference_config
(),
(
1
,
2
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
(
1
,
2
),
(
1e-
5
,
1e-
5
)
yield
self
.
create_inference_config
(),
(
1
,
2
),
(
1e-
3
,
1e-
3
)
def
test
(
self
):
def
test
(
self
):
self
.
run_test
()
self
.
run_test
()
...
@@ -277,7 +282,7 @@ class TrtConvertFcTest3(TrtLayerAutoScanTest):
...
@@ -277,7 +282,7 @@ class TrtConvertFcTest3(TrtLayerAutoScanTest):
"m"
:
m
,
"m"
:
m
,
"n"
:
n
,
"n"
:
n
,
},
},
{}
{}
,
]
]
ops_config
=
[
ops_config
=
[
...
@@ -286,12 +291,10 @@ class TrtConvertFcTest3(TrtLayerAutoScanTest):
...
@@ -286,12 +291,10 @@ class TrtConvertFcTest3(TrtLayerAutoScanTest):
"op_inputs"
:
{
"op_inputs"
:
{
"Input"
:
[
"input_data"
],
"Input"
:
[
"input_data"
],
"W"
:
[
"w_data"
],
"W"
:
[
"w_data"
],
"Bias"
:
[
"bias_data"
]
"Bias"
:
[
"bias_data"
]
,
},
},
"op_outputs"
:
{
"op_outputs"
:
{
"Out"
:
[
"output_data"
]},
"Out"
:
[
"output_data"
]
"op_attrs"
:
dics
[
0
],
},
"op_attrs"
:
dics
[
0
]
},
},
]
]
...
@@ -300,24 +303,26 @@ class TrtConvertFcTest3(TrtLayerAutoScanTest):
...
@@ -300,24 +303,26 @@ class TrtConvertFcTest3(TrtLayerAutoScanTest):
program_config
=
ProgramConfig
(
program_config
=
ProgramConfig
(
ops
=
ops
,
ops
=
ops
,
weights
=
{
weights
=
{
"w_data"
:
"w_data"
:
TensorConfig
(
TensorConfig
(
data_gen
=
partial
(
generate_w
,
batch
,
dics
)),
data_gen
=
partial
(
generate_w
,
batch
,
dics
)
"bias_data"
:
),
TensorConfig
(
"bias_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_bias
,
batch
,
dics
))
data_gen
=
partial
(
generate_bias
,
batch
,
dics
)
),
},
},
inputs
=
{
inputs
=
{
"input_data"
:
"input_data"
:
TensorConfig
(
TensorConfig
(
data_gen
=
partial
(
generate_input1
,
batch
,
dics
)
data_gen
=
partial
(
generate_input1
,
batch
,
dics
)
),
),
},
},
outputs
=
[
"output_data"
])
outputs
=
[
"output_data"
],
)
yield
program_config
yield
program_config
def
sample_predictor_configs
(
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
():
def
generate_dynamic_shape
():
self
.
dynamic_shape
.
min_input_shape
=
{
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
14
,
1
,
2
],
"input_data"
:
[
1
,
14
,
1
,
2
],
...
@@ -339,16 +344,16 @@ class TrtConvertFcTest3(TrtLayerAutoScanTest):
...
@@ -339,16 +344,16 @@ class TrtConvertFcTest3(TrtLayerAutoScanTest):
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
(
1
,
2
),
1e-5
yield
self
.
create_inference_config
(),
(
1
,
2
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
(
1
,
2
),
(
1e-
5
,
1e-
5
)
yield
self
.
create_inference_config
(),
(
1
,
2
),
(
1e-
3
,
1e-
3
)
# for dynamic_shape
# for dynamic_shape
generate_dynamic_shape
()
generate_dynamic_shape
()
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
(
1
,
2
),
1e-5
yield
self
.
create_inference_config
(),
(
1
,
2
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
(
1
,
2
),
(
1e-
5
,
1e-
5
)
yield
self
.
create_inference_config
(),
(
1
,
2
),
(
1e-
3
,
1e-
3
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Int8
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Int8
yield
self
.
create_inference_config
(),
(
1
,
2
),
(
1e-
5
,
1e-
5
)
yield
self
.
create_inference_config
(),
(
1
,
2
),
(
1e-
3
,
1e-
3
)
def
test
(
self
):
def
test
(
self
):
self
.
run_test
()
self
.
run_test
()
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_fill_constant.py
View file @
dbe08e9b
...
@@ -22,12 +22,10 @@ from typing import Optional, List, Callable, Dict, Any, Set
...
@@ -22,12 +22,10 @@ from typing import Optional, List, Callable, Dict, Any, Set
class
TrtConvertSplitTest
(
TrtLayerAutoScanTest
):
class
TrtConvertSplitTest
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
return
True
return
True
def
sample_program_configs
(
self
):
def
sample_program_configs
(
self
):
def
generate_value_data
(
attrs
:
List
[
Dict
[
str
,
Any
]]):
def
generate_value_data
(
attrs
:
List
[
Dict
[
str
,
Any
]]):
return
np
.
array
([
1
]).
astype
(
np
.
int32
)
return
np
.
array
([
1
]).
astype
(
np
.
int32
)
...
@@ -47,21 +45,28 @@ class TrtConvertSplitTest(TrtLayerAutoScanTest):
...
@@ -47,21 +45,28 @@ class TrtConvertSplitTest(TrtLayerAutoScanTest):
str_value
=
str_value
str_value
=
str_value
else
:
else
:
str_value
=
""
str_value
=
""
dics
=
[{
dics
=
[
"str_value"
:
str_value
,
{
"value"
:
value
,
"str_value"
:
str_value
,
"shape"
:
shape
,
"value"
:
value
,
"dtype"
:
dtype
"shape"
:
shape
,
},
{
"dtype"
:
dtype
,
"axis"
:
-
1
},
}]
{
"axis"
:
-
1
},
dics_intput
=
[{
]
"ValueTensor"
:
[
"value_data"
]
dics_intput
=
[
},
{
{
"ValueTensor"
:
[
"value_data"
]},
"ShapeTensor"
:
[
"shape_data"
],
{
},
{
"ShapeTensor"
:
[
"shape_data"
],
"ShapeTensorList"
:
[
"shapeT1_data"
,
"shapeT2_data"
],
},
},
{}]
{
"ShapeTensorList"
:
[
"shapeT1_data"
,
"shapeT2_data"
,
],
},
{},
]
ops_config
=
[
ops_config
=
[
{
{
"op_type"
:
"fill_constant"
,
"op_type"
:
"fill_constant"
,
...
@@ -69,7 +74,7 @@ class TrtConvertSplitTest(TrtLayerAutoScanTest):
...
@@ -69,7 +74,7 @@ class TrtConvertSplitTest(TrtLayerAutoScanTest):
"op_outputs"
:
{
"op_outputs"
:
{
"Out"
:
[
"out_data"
],
"Out"
:
[
"out_data"
],
},
},
"op_attrs"
:
dics
[
0
]
"op_attrs"
:
dics
[
0
]
,
},
},
]
]
...
@@ -81,26 +86,31 @@ class TrtConvertSplitTest(TrtLayerAutoScanTest):
...
@@ -81,26 +86,31 @@ class TrtConvertSplitTest(TrtLayerAutoScanTest):
ops
=
ops
,
ops
=
ops
,
weights
=
{},
weights
=
{},
inputs
=
{
inputs
=
{
"value_data"
:
"value_data"
:
TensorConfig
(
TensorConfig
(
data_gen
=
partial
(
data_gen
=
partial
(
generate_value_data
,
dics
)
generate_value_data
,
dics
)),
),
"shape_data"
:
"shape_data"
:
TensorConfig
(
TensorConfig
(
data_gen
=
partial
(
data_gen
=
partial
(
generate_shape_data
,
dics
)
generate_shape_data
,
dics
)),
),
"shapeT1_data"
:
"shapeT1_data"
:
TensorConfig
(
TensorConfig
(
data_gen
=
partial
(
data_gen
=
partial
(
generate_shapelist_data
,
dics
)),
generate_shapelist_data
,
dics
"shapeT2_data"
:
)
TensorConfig
(
data_gen
=
partial
(
),
generate_shapelist_data
,
dics
)),
"shapeT2_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_shapelist_data
,
dics
)
),
},
},
outputs
=
[
"out_data"
])
outputs
=
[
"out_data"
],
)
yield
program_config
yield
program_config
def
sample_predictor_configs
(
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
def
generate_dynamic_shape
(
attrs
):
self
.
input_shape
=
[
1
,
1
]
self
.
input_shape
=
[
1
,
1
]
max_shape
=
list
(
self
.
input_shape
)
max_shape
=
list
(
self
.
input_shape
)
...
@@ -118,7 +128,7 @@ class TrtConvertSplitTest(TrtLayerAutoScanTest):
...
@@ -118,7 +128,7 @@ class TrtConvertSplitTest(TrtLayerAutoScanTest):
self
.
dynamic_shape
.
opt_input_shape
=
{}
self
.
dynamic_shape
.
opt_input_shape
=
{}
def
generate_trt_nodes_num
(
attrs
,
dynamic_shape
):
def
generate_trt_nodes_num
(
attrs
,
dynamic_shape
):
if
(
self
.
num_input
<
3
)
:
if
self
.
num_input
<
3
:
return
0
,
6
return
0
,
6
return
1
,
5
return
1
,
5
...
@@ -131,10 +141,12 @@ class TrtConvertSplitTest(TrtLayerAutoScanTest):
...
@@ -131,10 +141,12 @@ class TrtConvertSplitTest(TrtLayerAutoScanTest):
generate_dynamic_shape
(
attrs
)
generate_dynamic_shape
(
attrs
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
1e-5
attrs
,
True
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
1e-5
attrs
,
True
),
1e-3
def
add_skip_trt_case
(
self
):
def
add_skip_trt_case
(
self
):
pass
pass
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_flatten.py
View file @
dbe08e9b
...
@@ -22,16 +22,14 @@ from typing import Optional, List, Callable, Dict, Any, Set
...
@@ -22,16 +22,14 @@ from typing import Optional, List, Callable, Dict, Any, Set
class
TrtConvertFlattenTest_dim_2
(
TrtLayerAutoScanTest
):
class
TrtConvertFlattenTest_dim_2
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
return
True
return
True
def
sample_program_configs
(
self
):
def
sample_program_configs
(
self
):
def
generate_input
(
batch
):
def
generate_input
(
batch
):
return
np
.
random
.
random
([
batch
,
32
]).
astype
(
np
.
float32
)
return
np
.
random
.
random
([
batch
,
32
]).
astype
(
np
.
float32
)
for
batch
in
[
1
,
2
,
4
]:
for
batch
in
[
1
,
4
]:
for
axis
in
[
0
,
1
]:
for
axis
in
[
0
,
1
]:
for
type
in
[
"flatten"
,
"flatten2"
]:
for
type
in
[
"flatten"
,
"flatten2"
]:
if
type
==
"flatten"
:
if
type
==
"flatten"
:
...
@@ -39,34 +37,35 @@ class TrtConvertFlattenTest_dim_2(TrtLayerAutoScanTest):
...
@@ -39,34 +37,35 @@ class TrtConvertFlattenTest_dim_2(TrtLayerAutoScanTest):
else
:
else
:
op_outputs
=
{
op_outputs
=
{
"Out"
:
[
"output_data"
],
"Out"
:
[
"output_data"
],
"XShape"
:
[
"xshape_data"
]
"XShape"
:
[
"xshape_data"
]
,
}
}
dics
=
[{
"axis"
:
axis
}]
dics
=
[{
"axis"
:
axis
}]
ops_config
=
[
{
ops_config
=
[
"op_type"
:
"flatten"
,
{
"op_inputs"
:
{
"op_type"
:
"flatten"
,
"X"
:
[
"input_data"
]
"op_inputs"
:
{
"X"
:
[
"input_data"
]
},
}
,
"op_outputs"
:
op_outputs
,
"op_outputs"
:
op_outputs
,
"op_attrs"
:
dics
[
0
]
,
"op_attrs"
:
dics
[
0
]
}
}
]
]
ops
=
self
.
generate_op_config
(
ops_config
)
ops
=
self
.
generate_op_config
(
ops_config
)
program_config
=
ProgramConfig
(
program_config
=
ProgramConfig
(
ops
=
ops
,
ops
=
ops
,
weights
=
{},
weights
=
{},
inputs
=
{
inputs
=
{
"input_data"
:
"input_data"
:
TensorConfig
(
TensorConfig
(
data_gen
=
partial
(
generate_input
,
batch
)
data_gen
=
partial
(
generate_input
,
batch
)
)
)
},
},
outputs
=
[
"output_data"
])
outputs
=
[
"output_data"
],
)
yield
program_config
yield
program_config
def
sample_predictor_configs
(
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
def
generate_dynamic_shape
(
attrs
):
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
8
]}
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
8
]}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
4
,
64
]}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
4
,
64
]}
...
@@ -100,35 +99,37 @@ class TrtConvertFlattenTest_dim_2(TrtLayerAutoScanTest):
...
@@ -100,35 +99,37 @@ class TrtConvertFlattenTest_dim_2(TrtLayerAutoScanTest):
# for static_shape
# for static_shape
clear_dynamic_shape
()
clear_dynamic_shape
()
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
1e-5
attrs
,
False
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
(
1e-3
,
1e-3
)
attrs
,
False
),
(
1e-3
,
1e-3
)
# for dynamic_shape
# for dynamic_shape
generate_dynamic_shape
(
attrs
)
generate_dynamic_shape
(
attrs
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
1e-5
attrs
,
True
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
(
1e-3
,
1e-3
)
attrs
,
True
),
(
1e-3
,
1e-3
)
def
test
(
self
):
def
test
(
self
):
self
.
run_test
()
self
.
run_test
()
class
TrtConvertFlattenTest_dim_3
(
TrtLayerAutoScanTest
):
class
TrtConvertFlattenTest_dim_3
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
return
True
return
True
def
sample_program_configs
(
self
):
def
sample_program_configs
(
self
):
def
generate_input
(
batch
):
def
generate_input
(
batch
):
return
np
.
random
.
random
([
batch
,
32
,
64
]).
astype
(
np
.
float32
)
return
np
.
random
.
random
([
batch
,
32
,
64
]).
astype
(
np
.
float32
)
for
batch
in
[
1
,
2
,
4
]:
for
batch
in
[
1
,
4
]:
for
axis
in
[
0
,
1
,
2
]:
for
axis
in
[
0
,
1
,
2
]:
for
type
in
[
"flatten"
,
"flatten2"
]:
for
type
in
[
"flatten"
,
"flatten2"
]:
if
type
==
"flatten"
:
if
type
==
"flatten"
:
...
@@ -136,38 +137,39 @@ class TrtConvertFlattenTest_dim_3(TrtLayerAutoScanTest):
...
@@ -136,38 +137,39 @@ class TrtConvertFlattenTest_dim_3(TrtLayerAutoScanTest):
else
:
else
:
op_outputs
=
{
op_outputs
=
{
"Out"
:
[
"output_data"
],
"Out"
:
[
"output_data"
],
"XShape"
:
[
"xshape_data"
]
"XShape"
:
[
"xshape_data"
]
,
}
}
dics
=
[{
"axis"
:
axis
}]
dics
=
[{
"axis"
:
axis
}]
ops_config
=
[
{
ops_config
=
[
"op_type"
:
"flatten"
,
{
"op_inputs"
:
{
"op_type"
:
"flatten"
,
"X"
:
[
"input_data"
]
"op_inputs"
:
{
"X"
:
[
"input_data"
]
},
}
,
"op_outputs"
:
op_outputs
,
"op_outputs"
:
op_outputs
,
"op_attrs"
:
dics
[
0
]
,
"op_attrs"
:
dics
[
0
]
}
}
]
]
ops
=
self
.
generate_op_config
(
ops_config
)
ops
=
self
.
generate_op_config
(
ops_config
)
program_config
=
ProgramConfig
(
program_config
=
ProgramConfig
(
ops
=
ops
,
ops
=
ops
,
weights
=
{},
weights
=
{},
inputs
=
{
inputs
=
{
"input_data"
:
"input_data"
:
TensorConfig
(
TensorConfig
(
data_gen
=
partial
(
generate_input
,
batch
)
data_gen
=
partial
(
generate_input
,
batch
)
)
)
},
},
outputs
=
[
"output_data"
])
outputs
=
[
"output_data"
],
)
yield
program_config
yield
program_config
def
sample_predictor_configs
(
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
def
generate_dynamic_shape
(
attrs
):
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
8
,
8
]}
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
8
,
8
]}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
4
,
64
,
768
]}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
4
,
32
,
64
]}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
2
,
32
,
25
6
]}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
2
,
32
,
6
4
]}
def
clear_dynamic_shape
():
def
clear_dynamic_shape
():
self
.
dynamic_shape
.
max_input_shape
=
{}
self
.
dynamic_shape
.
max_input_shape
=
{}
...
@@ -198,35 +200,37 @@ class TrtConvertFlattenTest_dim_3(TrtLayerAutoScanTest):
...
@@ -198,35 +200,37 @@ class TrtConvertFlattenTest_dim_3(TrtLayerAutoScanTest):
clear_dynamic_shape
()
clear_dynamic_shape
()
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
1e-5
attrs
,
False
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
(
1e-3
,
1e-3
)
attrs
,
False
),
(
1e-3
,
1e-3
)
# for dynamic_shape
# for dynamic_shape
generate_dynamic_shape
(
attrs
)
generate_dynamic_shape
(
attrs
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
1e-5
attrs
,
True
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
(
1e-3
,
1e-3
)
attrs
,
True
),
(
1e-3
,
1e-3
)
def
test
(
self
):
def
test
(
self
):
self
.
run_test
()
self
.
run_test
()
class
TrtConvertFlattenTest_dim_4
(
TrtLayerAutoScanTest
):
class
TrtConvertFlattenTest_dim_4
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
return
True
return
True
def
sample_program_configs
(
self
):
def
sample_program_configs
(
self
):
def
generate_input
(
batch
):
def
generate_input
(
batch
):
return
np
.
random
.
random
([
batch
,
8
,
8
,
8
]).
astype
(
np
.
float32
)
return
np
.
random
.
random
([
batch
,
8
,
8
,
8
]).
astype
(
np
.
float32
)
for
batch
in
[
1
,
2
,
4
]:
for
batch
in
[
1
,
4
]:
for
axis
in
[
0
,
1
,
2
,
3
]:
for
axis
in
[
0
,
1
,
2
,
3
]:
for
type
in
[
"flatten"
,
"flatten2"
]:
for
type
in
[
"flatten"
,
"flatten2"
]:
if
type
==
"flatten"
:
if
type
==
"flatten"
:
...
@@ -234,37 +238,38 @@ class TrtConvertFlattenTest_dim_4(TrtLayerAutoScanTest):
...
@@ -234,37 +238,38 @@ class TrtConvertFlattenTest_dim_4(TrtLayerAutoScanTest):
else
:
else
:
op_outputs
=
{
op_outputs
=
{
"Out"
:
[
"output_data"
],
"Out"
:
[
"output_data"
],
"XShape"
:
[
"xshape_data"
]
"XShape"
:
[
"xshape_data"
]
,
}
}
dics
=
[{
"axis"
:
axis
}]
dics
=
[{
"axis"
:
axis
}]
ops_config
=
[
{
ops_config
=
[
"op_type"
:
"flatten"
,
{
"op_inputs"
:
{
"op_type"
:
"flatten"
,
"X"
:
[
"input_data"
]
"op_inputs"
:
{
"X"
:
[
"input_data"
]
},
}
,
"op_outputs"
:
op_outputs
,
"op_outputs"
:
op_outputs
,
"op_attrs"
:
dics
[
0
]
,
"op_attrs"
:
dics
[
0
]
}
}
]
]
ops
=
self
.
generate_op_config
(
ops_config
)
ops
=
self
.
generate_op_config
(
ops_config
)
program_config
=
ProgramConfig
(
program_config
=
ProgramConfig
(
ops
=
ops
,
ops
=
ops
,
weights
=
{},
weights
=
{},
inputs
=
{
inputs
=
{
"input_data"
:
"input_data"
:
TensorConfig
(
TensorConfig
(
data_gen
=
partial
(
generate_input
,
batch
)
data_gen
=
partial
(
generate_input
,
batch
)
)
)
},
},
outputs
=
[
"output_data"
])
outputs
=
[
"output_data"
],
)
yield
program_config
yield
program_config
def
sample_predictor_configs
(
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
def
generate_dynamic_shape
(
attrs
):
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
4
,
4
,
4
]}
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
4
,
4
,
4
]}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
4
,
32
,
64
,
64
]}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
4
,
32
,
32
,
32
]}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
2
,
16
,
16
,
8
]}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
2
,
16
,
16
,
8
]}
def
clear_dynamic_shape
():
def
clear_dynamic_shape
():
...
@@ -294,36 +299,39 @@ class TrtConvertFlattenTest_dim_4(TrtLayerAutoScanTest):
...
@@ -294,36 +299,39 @@ class TrtConvertFlattenTest_dim_4(TrtLayerAutoScanTest):
# for static_shape
# for static_shape
clear_dynamic_shape
()
clear_dynamic_shape
()
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
1e-5
attrs
,
False
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
(
1e-3
,
1e-3
)
attrs
,
False
),
(
1e-3
,
1e-3
)
# for dynamic_shape
# for dynamic_shape
generate_dynamic_shape
(
attrs
)
generate_dynamic_shape
(
attrs
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
1e-5
attrs
,
True
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
(
1e-3
,
1e-3
)
attrs
,
True
),
(
1e-3
,
1e-3
)
def
test
(
self
):
def
test
(
self
):
self
.
run_test
()
self
.
run_test
()
class
TrtConvertFlattenTest_dim_5
(
TrtLayerAutoScanTest
):
class
TrtConvertFlattenTest_dim_5
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
return
True
return
True
def
sample_program_configs
(
self
):
def
sample_program_configs
(
self
):
def
generate_input
(
batch
):
def
generate_input
(
batch
):
return
np
.
random
.
random
([
batch
,
8
,
8
,
8
]).
astype
(
np
.
float32
)
return
np
.
random
.
random
([
batch
,
8
,
8
,
8
]).
astype
(
np
.
float32
)
for
batch
in
[
1
,
2
,
4
]:
for
batch
in
[
1
,
4
]:
for
axis
in
[
0
,
1
,
2
,
3
,
4
]:
for
axis
in
[
0
,
1
,
2
,
3
,
4
]:
for
type
in
[
"flatten"
,
"flatten2"
]:
for
type
in
[
"flatten"
,
"flatten2"
]:
if
type
==
"flatten"
:
if
type
==
"flatten"
:
...
@@ -331,37 +339,38 @@ class TrtConvertFlattenTest_dim_5(TrtLayerAutoScanTest):
...
@@ -331,37 +339,38 @@ class TrtConvertFlattenTest_dim_5(TrtLayerAutoScanTest):
else
:
else
:
op_outputs
=
{
op_outputs
=
{
"Out"
:
[
"output_data"
],
"Out"
:
[
"output_data"
],
"XShape"
:
[
"xshape_data"
]
"XShape"
:
[
"xshape_data"
]
,
}
}
dics
=
[{
"axis"
:
axis
}]
dics
=
[{
"axis"
:
axis
}]
ops_config
=
[
{
ops_config
=
[
"op_type"
:
"flatten"
,
{
"op_inputs"
:
{
"op_type"
:
"flatten"
,
"X"
:
[
"input_data"
]
"op_inputs"
:
{
"X"
:
[
"input_data"
]
},
}
,
"op_outputs"
:
op_outputs
,
"op_outputs"
:
op_outputs
,
"op_attrs"
:
dics
[
0
]
,
"op_attrs"
:
dics
[
0
]
}
}
]
]
ops
=
self
.
generate_op_config
(
ops_config
)
ops
=
self
.
generate_op_config
(
ops_config
)
program_config
=
ProgramConfig
(
program_config
=
ProgramConfig
(
ops
=
ops
,
ops
=
ops
,
weights
=
{},
weights
=
{},
inputs
=
{
inputs
=
{
"input_data"
:
"input_data"
:
TensorConfig
(
TensorConfig
(
data_gen
=
partial
(
generate_input
,
batch
)
data_gen
=
partial
(
generate_input
,
batch
)
)
)
},
},
outputs
=
[
"output_data"
])
outputs
=
[
"output_data"
],
)
yield
program_config
yield
program_config
def
sample_predictor_configs
(
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
def
generate_dynamic_shape
(
attrs
):
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
4
,
4
,
4
]}
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
4
,
4
,
4
]}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
4
,
32
,
64
,
64
]}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
4
,
16
,
16
,
8
]}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
2
,
16
,
16
,
8
]}
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
2
,
16
,
16
,
8
]}
def
clear_dynamic_shape
():
def
clear_dynamic_shape
():
...
@@ -391,20 +400,25 @@ class TrtConvertFlattenTest_dim_5(TrtLayerAutoScanTest):
...
@@ -391,20 +400,25 @@ class TrtConvertFlattenTest_dim_5(TrtLayerAutoScanTest):
# for static_shape
# for static_shape
clear_dynamic_shape
()
clear_dynamic_shape
()
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
1e-5
attrs
,
False
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
(
1e-3
,
1e-3
)
attrs
,
False
),
(
1e-3
,
1e-3
)
# for dynamic_shape
# for dynamic_shape
generate_dynamic_shape
(
attrs
)
generate_dynamic_shape
(
attrs
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
1e-5
attrs
,
True
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
(
1e-3
,
1e-3
)
attrs
,
True
),
(
1e-3
,
1e-3
)
def
test
(
self
):
def
test
(
self
):
self
.
run_test
()
self
.
run_test
()
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_gather.py
View file @
dbe08e9b
...
@@ -23,7 +23,6 @@ import unittest
...
@@ -23,7 +23,6 @@ import unittest
class
TrtConvertGatherTest
(
TrtLayerAutoScanTest
):
class
TrtConvertGatherTest
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
inputs
=
program_config
.
inputs
inputs
=
program_config
.
inputs
attrs
=
[
attrs
=
[
...
@@ -35,7 +34,6 @@ class TrtConvertGatherTest(TrtLayerAutoScanTest):
...
@@ -35,7 +34,6 @@ class TrtConvertGatherTest(TrtLayerAutoScanTest):
return
True
return
True
def
sample_program_configs
(
self
):
def
sample_program_configs
(
self
):
def
generate_input1
(
shape
):
def
generate_input1
(
shape
):
return
np
.
random
.
random
(
shape
).
astype
(
np
.
float32
)
return
np
.
random
.
random
(
shape
).
astype
(
np
.
float32
)
...
@@ -52,112 +50,126 @@ class TrtConvertGatherTest(TrtLayerAutoScanTest):
...
@@ -52,112 +50,126 @@ class TrtConvertGatherTest(TrtLayerAutoScanTest):
for
index
in
[[
1
,
4
],
[
4
,
8
]]:
for
index
in
[[
1
,
4
],
[
4
,
8
]]:
for
axis
in
[
0
,
1
,
2
,
3
]:
for
axis
in
[
0
,
1
,
2
,
3
]:
for
overwrite
in
[
True
,
False
]:
for
overwrite
in
[
True
,
False
]:
for
input
in
[{
for
input
in
[
"X"
:
[
"input_data"
],
{
"X"
:
[
"input_data"
],
"Index"
:
[
"index_data"
]},
"Index"
:
[
"index_data"
]
{
},
{
"X"
:
[
"input_data"
],
"X"
:
[
"input_data"
],
"Index"
:
[
"index_data"
],
"Index"
:
[
"index_data"
],
"Axis"
:
[
"axis_data"
]
"Axis"
:
[
"axis_data"
],
}]:
},
]:
for
index_type_int32
in
[
True
,
False
]:
for
index_type_int32
in
[
True
,
False
]:
self
.
shape
=
shape
self
.
shape
=
shape
self
.
axis
=
axis
self
.
axis
=
axis
self
.
input_num
=
len
(
input
)
self
.
input_num
=
len
(
input
)
self
.
index_type_int32
=
index_type_int32
self
.
index_type_int32
=
index_type_int32
dics
=
[{
"overwrite"
:
overwrite
,
"axis"
:
axis
}]
dics
=
[{
"overwrite"
:
overwrite
,
"axis"
:
axis
}]
ops_config
=
[
{
ops_config
=
[
"op_type"
:
"gather"
,
{
"op_inputs"
:
input
,
"op_type"
:
"gather"
,
"op_
out
puts"
:
{
"op_
in
puts"
:
input
,
"Out"
:
[
"output_data"
]
"op_outputs"
:
{
"Out"
:
[
"output_data"
]
},
}
,
"op_attrs"
:
dics
[
0
]
,
"op_attrs"
:
dics
[
0
]
}
}
]
]
ops
=
self
.
generate_op_config
(
ops_config
)
ops
=
self
.
generate_op_config
(
ops_config
)
program_config
=
ProgramConfig
(
program_config
=
ProgramConfig
(
ops
=
ops
,
ops
=
ops
,
weights
=
{},
weights
=
{},
inputs
=
{
inputs
=
{
"input_data"
:
"input_data"
:
TensorConfig
(
TensorConfig
(
data_gen
=
partial
(
data_gen
=
partial
(
generate_input1
,
shape
)),
generate_input1
,
shape
"index_data"
:
)
TensorConfig
(
data_gen
=
partial
(
),
generate_input2
"index_data"
:
TensorConfig
(
if
index_type_int32
==
data_gen
=
partial
(
True
else
generate_input4
,
index
)),
generate_input2
}
if
len
(
input
)
==
2
else
{
if
index_type_int32
==
True
"input_data"
:
else
generate_input4
,
TensorConfig
(
data_gen
=
partial
(
index
,
generate_input1
,
shape
)),
)
"index_data"
:
),
TensorConfig
(
data_gen
=
partial
(
}
generate_input2
,
index
)),
if
len
(
input
)
==
2
"axis_data"
:
else
{
TensorConfig
(
data_gen
=
partial
(
"input_data"
:
TensorConfig
(
generate_input3
,
axis
)),
data_gen
=
partial
(
generate_input1
,
shape
)
),
"index_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input2
,
index
)
),
"axis_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input3
,
axis
)
),
},
},
outputs
=
[
"output_data"
])
outputs
=
[
"output_data"
],
)
yield
program_config
yield
program_config
def
sample_predictor_configs
(
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
def
generate_dynamic_shape
(
attrs
):
if
len
(
self
.
shape
)
==
1
:
if
len
(
self
.
shape
)
==
1
:
self
.
dynamic_shape
.
min_input_shape
=
{
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
4
],
"input_data"
:
[
4
],
"index_data"
:
[
1
]
"index_data"
:
[
1
]
,
}
}
self
.
dynamic_shape
.
max_input_shape
=
{
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
128
],
"input_data"
:
[
128
],
"index_data"
:
[
4
]
"index_data"
:
[
4
]
,
}
}
self
.
dynamic_shape
.
opt_input_shape
=
{
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
16
],
"input_data"
:
[
16
],
"index_data"
:
[
2
]
"index_data"
:
[
2
]
,
}
}
elif
len
(
self
.
shape
)
==
2
:
elif
len
(
self
.
shape
)
==
2
:
self
.
dynamic_shape
.
min_input_shape
=
{
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
2
,
4
],
"input_data"
:
[
2
,
4
],
"index_data"
:
[
1
]
"index_data"
:
[
1
]
,
}
}
self
.
dynamic_shape
.
max_input_shape
=
{
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
256
,
256
],
"input_data"
:
[
256
,
256
],
"index_data"
:
[
4
]
"index_data"
:
[
4
]
,
}
}
self
.
dynamic_shape
.
opt_input_shape
=
{
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
64
,
32
],
"input_data"
:
[
64
,
32
],
"index_data"
:
[
2
]
"index_data"
:
[
2
]
,
}
}
elif
len
(
self
.
shape
)
==
3
:
elif
len
(
self
.
shape
)
==
3
:
self
.
dynamic_shape
.
min_input_shape
=
{
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
2
,
4
,
4
],
"input_data"
:
[
2
,
4
,
4
],
"index_data"
:
[
1
]
"index_data"
:
[
1
]
,
}
}
self
.
dynamic_shape
.
max_input_shape
=
{
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
128
,
256
,
256
],
"input_data"
:
[
128
,
256
,
256
],
"index_data"
:
[
4
]
"index_data"
:
[
4
]
,
}
}
self
.
dynamic_shape
.
opt_input_shape
=
{
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
16
,
64
,
32
],
"input_data"
:
[
16
,
64
,
32
],
"index_data"
:
[
2
]
"index_data"
:
[
2
]
,
}
}
elif
len
(
self
.
shape
)
==
4
:
elif
len
(
self
.
shape
)
==
4
:
self
.
dynamic_shape
.
min_input_shape
=
{
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
2
,
4
,
4
,
2
],
"input_data"
:
[
2
,
4
,
4
,
2
],
"index_data"
:
[
1
]
"index_data"
:
[
1
]
,
}
}
self
.
dynamic_shape
.
max_input_shape
=
{
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
128
,
256
,
64
,
128
],
"input_data"
:
[
128
,
256
,
64
,
128
],
"index_data"
:
[
4
]
"index_data"
:
[
4
]
,
}
}
self
.
dynamic_shape
.
opt_input_shape
=
{
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
16
,
64
,
16
,
32
],
"input_data"
:
[
16
,
64
,
16
,
32
],
"index_data"
:
[
2
]
"index_data"
:
[
2
]
,
}
}
def
clear_dynamic_shape
():
def
clear_dynamic_shape
():
...
@@ -182,10 +194,12 @@ class TrtConvertGatherTest(TrtLayerAutoScanTest):
...
@@ -182,10 +194,12 @@ class TrtConvertGatherTest(TrtLayerAutoScanTest):
clear_dynamic_shape
()
clear_dynamic_shape
()
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
False
),
1e-5
False
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
False
),
1e-5
False
),
1e-3
# for dynamic_shape
# for dynamic_shape
generate_dynamic_shape
(
attrs
)
generate_dynamic_shape
(
attrs
)
...
@@ -201,14 +215,17 @@ class TrtConvertGatherTest(TrtLayerAutoScanTest):
...
@@ -201,14 +215,17 @@ class TrtConvertGatherTest(TrtLayerAutoScanTest):
def
teller1
(
program_config
,
predictor_config
):
def
teller1
(
program_config
,
predictor_config
):
if
len
(
self
.
dynamic_shape
.
min_input_shape
)
!=
0
:
if
len
(
self
.
dynamic_shape
.
min_input_shape
)
!=
0
:
inputs
=
program_config
.
inputs
inputs
=
program_config
.
inputs
if
len
(
inputs
[
'input_data'
].
shape
)
==
1
or
len
(
if
(
inputs
[
'index_data'
].
shape
)
==
1
:
len
(
inputs
[
'input_data'
].
shape
)
==
1
or
len
(
inputs
[
'index_data'
].
shape
)
==
1
):
return
True
return
True
return
False
return
False
self
.
add_skip_case
(
self
.
add_skip_case
(
teller1
,
SkipReasons
.
TRT_NOT_SUPPORT
,
teller1
,
"Need to repair the case: trt reshape out failed for dynamic shape mode when inputs' dims==1. under trt7.0 "
SkipReasons
.
TRT_NOT_SUPPORT
,
"Need to repair the case: trt reshape out failed for dynamic shape mode when inputs' dims==1. under trt7.0 "
,
)
)
def
test
(
self
):
def
test
(
self
):
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_gather_nd.py
View file @
dbe08e9b
...
@@ -23,7 +23,6 @@ import os
...
@@ -23,7 +23,6 @@ import os
class
TrtConvertGatherNdTest_dim_4_1
(
TrtLayerAutoScanTest
):
class
TrtConvertGatherNdTest_dim_4_1
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
# The output has diff between gpu and trt in CI windows
# The output has diff between gpu and trt in CI windows
# if ( and self.trt_param.precision == paddle_infer.PrecisionType.Half):
# if ( and self.trt_param.precision == paddle_infer.PrecisionType.Half):
...
@@ -31,54 +30,53 @@ class TrtConvertGatherNdTest_dim_4_1(TrtLayerAutoScanTest):
...
@@ -31,54 +30,53 @@ class TrtConvertGatherNdTest_dim_4_1(TrtLayerAutoScanTest):
return
True
return
True
def
sample_program_configs
(
self
):
def
sample_program_configs
(
self
):
def
generate_input1
():
def
generate_input1
():
return
np
.
random
.
random
([
2
,
32
,
64
,
64
]).
astype
(
np
.
float32
)
return
np
.
random
.
random
([
2
,
32
,
64
,
64
]).
astype
(
np
.
float32
)
def
generate_input2
():
def
generate_input2
():
return
np
.
ones
([
1
]).
astype
(
np
.
int32
)
return
np
.
ones
([
1
]).
astype
(
np
.
int32
)
ops_config
=
[{
ops_config
=
[
"op_type"
:
"gather_nd"
,
{
"op_inputs"
:
{
"op_type"
:
"gather_nd"
,
"X"
:
[
"input_data"
],
"op_inputs"
:
{
"X"
:
[
"input_data"
],
"Index"
:
[
"index_data"
]},
"Index"
:
[
"index_data"
]
"op_outputs"
:
{
"Out"
:
[
"output_data"
]},
},
"op_attrs"
:
{},
"op_outputs"
:
{
}
"Out"
:
[
"output_data"
]
]
},
"op_attrs"
:
{}
}]
ops
=
self
.
generate_op_config
(
ops_config
)
ops
=
self
.
generate_op_config
(
ops_config
)
for
i
in
range
(
10
):
for
i
in
range
(
10
):
program_config
=
ProgramConfig
(
program_config
=
ProgramConfig
(
ops
=
ops
,
ops
=
ops
,
weights
=
{},
weights
=
{},
inputs
=
{
inputs
=
{
"input_data"
:
"input_data"
:
TensorConfig
(
TensorConfig
(
data_gen
=
partial
(
generate_input1
)),
data_gen
=
partial
(
generate_input1
)
"index_data"
:
),
TensorConfig
(
data_gen
=
partial
(
generate_input2
)),
"index_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input2
)
),
},
},
outputs
=
[
"output_data"
])
outputs
=
[
"output_data"
],
)
yield
program_config
yield
program_config
def
sample_predictor_configs
(
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
def
generate_dynamic_shape
(
attrs
):
self
.
dynamic_shape
.
min_input_shape
=
{
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
8
,
8
,
8
],
"input_data"
:
[
1
,
8
,
8
,
8
],
"index_data"
:
[
1
]
"index_data"
:
[
1
]
,
}
}
self
.
dynamic_shape
.
max_input_shape
=
{
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
4
,
32
,
64
,
64
],
"input_data"
:
[
4
,
32
,
64
,
64
],
"index_data"
:
[
1
]
"index_data"
:
[
1
]
,
}
}
self
.
dynamic_shape
.
opt_input_shape
=
{
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
2
,
32
,
64
,
64
],
"input_data"
:
[
2
,
32
,
64
,
64
],
"index_data"
:
[
1
]
"index_data"
:
[
1
]
,
}
}
def
clear_dynamic_shape
():
def
clear_dynamic_shape
():
...
@@ -95,25 +93,26 @@ class TrtConvertGatherNdTest_dim_4_1(TrtLayerAutoScanTest):
...
@@ -95,25 +93,26 @@ class TrtConvertGatherNdTest_dim_4_1(TrtLayerAutoScanTest):
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
(
0
,
4
),
1e-5
yield
self
.
create_inference_config
(),
(
0
,
4
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
(
0
,
4
),
1e-
5
yield
self
.
create_inference_config
(),
(
0
,
4
),
1e-
3
# for dynamic_shape
# for dynamic_shape
generate_dynamic_shape
(
attrs
)
generate_dynamic_shape
(
attrs
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
(
1
,
3
),
1e-5
yield
self
.
create_inference_config
(),
(
1
,
3
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
(
1
,
3
),
1e-
5
yield
self
.
create_inference_config
(),
(
1
,
3
),
1e-
3
def
add_skip_trt_case
(
self
):
def
add_skip_trt_case
(
self
):
def
teller1
(
program_config
,
predictor_config
):
def
teller1
(
program_config
,
predictor_config
):
if
len
(
self
.
dynamic_shape
.
min_input_shape
)
!=
0
and
os
.
name
==
'nt'
:
if
len
(
self
.
dynamic_shape
.
min_input_shape
)
!=
0
and
os
.
name
==
'nt'
:
return
True
return
True
return
False
return
False
self
.
add_skip_case
(
self
.
add_skip_case
(
teller1
,
SkipReasons
.
TRT_NOT_SUPPORT
,
teller1
,
"Under Windows Ci, this case will sporadically fail."
)
SkipReasons
.
TRT_NOT_SUPPORT
,
"Under Windows Ci, this case will sporadically fail."
,
)
def
test
(
self
):
def
test
(
self
):
self
.
add_skip_trt_case
()
self
.
add_skip_trt_case
()
...
@@ -121,29 +120,24 @@ class TrtConvertGatherNdTest_dim_4_1(TrtLayerAutoScanTest):
...
@@ -121,29 +120,24 @@ class TrtConvertGatherNdTest_dim_4_1(TrtLayerAutoScanTest):
class
TrtConvertGatherNdTest_dim_4_1_2
(
TrtLayerAutoScanTest
):
class
TrtConvertGatherNdTest_dim_4_1_2
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
return
True
return
True
def
sample_program_configs
(
self
):
def
sample_program_configs
(
self
):
def
generate_input1
():
def
generate_input1
():
return
np
.
random
.
random
([
2
,
32
,
64
,
64
]).
astype
(
np
.
float32
)
return
np
.
random
.
random
([
2
,
32
,
64
,
64
]).
astype
(
np
.
float32
)
def
generate_input2
():
def
generate_input2
():
return
np
.
array
([
1
,
2
]).
astype
(
np
.
int32
)
return
np
.
array
([
1
,
2
]).
astype
(
np
.
int32
)
ops_config
=
[{
ops_config
=
[
"op_type"
:
"gather_nd"
,
{
"op_inputs"
:
{
"op_type"
:
"gather_nd"
,
"X"
:
[
"input_data"
],
"op_inputs"
:
{
"X"
:
[
"input_data"
],
"Index"
:
[
"index_data"
]},
"Index"
:
[
"index_data"
]
"op_outputs"
:
{
"Out"
:
[
"output_data"
]},
},
"op_attrs"
:
{},
"op_outputs"
:
{
}
"Out"
:
[
"output_data"
]
]
},
"op_attrs"
:
{}
}]
ops
=
self
.
generate_op_config
(
ops_config
)
ops
=
self
.
generate_op_config
(
ops_config
)
program_config
=
ProgramConfig
(
program_config
=
ProgramConfig
(
...
@@ -153,25 +147,26 @@ class TrtConvertGatherNdTest_dim_4_1_2(TrtLayerAutoScanTest):
...
@@ -153,25 +147,26 @@ class TrtConvertGatherNdTest_dim_4_1_2(TrtLayerAutoScanTest):
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input1
)),
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input1
)),
"index_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input2
)),
"index_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input2
)),
},
},
outputs
=
[
"output_data"
])
outputs
=
[
"output_data"
],
)
yield
program_config
yield
program_config
def
sample_predictor_configs
(
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
def
generate_dynamic_shape
(
attrs
):
self
.
dynamic_shape
.
min_input_shape
=
{
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
8
,
8
,
8
],
"input_data"
:
[
1
,
8
,
8
,
8
],
"index_data"
:
[
2
]
"index_data"
:
[
2
]
,
}
}
self
.
dynamic_shape
.
max_input_shape
=
{
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
4
,
32
,
64
,
64
],
"input_data"
:
[
4
,
32
,
64
,
64
],
"index_data"
:
[
2
]
"index_data"
:
[
2
]
,
}
}
self
.
dynamic_shape
.
opt_input_shape
=
{
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
2
,
32
,
64
,
64
],
"input_data"
:
[
2
,
32
,
64
,
64
],
"index_data"
:
[
2
]
"index_data"
:
[
2
]
,
}
}
def
clear_dynamic_shape
():
def
clear_dynamic_shape
():
...
@@ -188,25 +183,26 @@ class TrtConvertGatherNdTest_dim_4_1_2(TrtLayerAutoScanTest):
...
@@ -188,25 +183,26 @@ class TrtConvertGatherNdTest_dim_4_1_2(TrtLayerAutoScanTest):
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
(
0
,
4
),
1e-5
yield
self
.
create_inference_config
(),
(
0
,
4
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
(
0
,
4
),
1e-
5
yield
self
.
create_inference_config
(),
(
0
,
4
),
1e-
3
# for dynamic_shape
# for dynamic_shape
generate_dynamic_shape
(
attrs
)
generate_dynamic_shape
(
attrs
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
(
1
,
3
),
1e-5
yield
self
.
create_inference_config
(),
(
1
,
3
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
(
1
,
3
),
1e-
5
yield
self
.
create_inference_config
(),
(
1
,
3
),
1e-
3
def
add_skip_trt_case
(
self
):
def
add_skip_trt_case
(
self
):
def
teller1
(
program_config
,
predictor_config
):
def
teller1
(
program_config
,
predictor_config
):
if
len
(
self
.
dynamic_shape
.
min_input_shape
)
!=
0
and
os
.
name
==
'nt'
:
if
len
(
self
.
dynamic_shape
.
min_input_shape
)
!=
0
and
os
.
name
==
'nt'
:
return
True
return
True
return
False
return
False
self
.
add_skip_case
(
self
.
add_skip_case
(
teller1
,
SkipReasons
.
TRT_NOT_SUPPORT
,
teller1
,
"Under Windows Ci, this case will sporadically fail."
)
SkipReasons
.
TRT_NOT_SUPPORT
,
"Under Windows Ci, this case will sporadically fail."
,
)
def
test
(
self
):
def
test
(
self
):
self
.
add_skip_trt_case
()
self
.
add_skip_trt_case
()
...
@@ -214,29 +210,24 @@ class TrtConvertGatherNdTest_dim_4_1_2(TrtLayerAutoScanTest):
...
@@ -214,29 +210,24 @@ class TrtConvertGatherNdTest_dim_4_1_2(TrtLayerAutoScanTest):
class
TrtConvertGatherNdTest_dim_4_2
(
TrtLayerAutoScanTest
):
class
TrtConvertGatherNdTest_dim_4_2
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
return
True
return
True
def
sample_program_configs
(
self
):
def
sample_program_configs
(
self
):
def
generate_input1
():
def
generate_input1
():
return
np
.
random
.
random
([
2
,
32
,
64
,
64
]).
astype
(
np
.
float32
)
return
np
.
random
.
random
([
2
,
32
,
64
,
64
]).
astype
(
np
.
float32
)
def
generate_input2
():
def
generate_input2
():
return
np
.
ones
([
2
,
2
]).
astype
(
np
.
int32
)
return
np
.
ones
([
2
,
2
]).
astype
(
np
.
int32
)
ops_config
=
[{
ops_config
=
[
"op_type"
:
"gather_nd"
,
{
"op_inputs"
:
{
"op_type"
:
"gather_nd"
,
"X"
:
[
"input_data"
],
"op_inputs"
:
{
"X"
:
[
"input_data"
],
"Index"
:
[
"index_data"
]},
"Index"
:
[
"index_data"
]
"op_outputs"
:
{
"Out"
:
[
"output_data"
]},
},
"op_attrs"
:
{},
"op_outputs"
:
{
}
"Out"
:
[
"output_data"
]
]
},
"op_attrs"
:
{}
}]
ops
=
self
.
generate_op_config
(
ops_config
)
ops
=
self
.
generate_op_config
(
ops_config
)
program_config
=
ProgramConfig
(
program_config
=
ProgramConfig
(
...
@@ -246,25 +237,26 @@ class TrtConvertGatherNdTest_dim_4_2(TrtLayerAutoScanTest):
...
@@ -246,25 +237,26 @@ class TrtConvertGatherNdTest_dim_4_2(TrtLayerAutoScanTest):
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input1
)),
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input1
)),
"index_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input2
)),
"index_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input2
)),
},
},
outputs
=
[
"output_data"
])
outputs
=
[
"output_data"
],
)
yield
program_config
yield
program_config
def
sample_predictor_configs
(
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
def
generate_dynamic_shape
(
attrs
):
self
.
dynamic_shape
.
min_input_shape
=
{
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
8
,
8
,
8
],
"input_data"
:
[
1
,
8
,
8
,
8
],
"index_data"
:
[
2
,
2
]
"index_data"
:
[
2
,
2
]
,
}
}
self
.
dynamic_shape
.
max_input_shape
=
{
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
4
,
32
,
64
,
64
],
"input_data"
:
[
4
,
32
,
64
,
64
],
"index_data"
:
[
2
,
2
]
"index_data"
:
[
2
,
2
]
,
}
}
self
.
dynamic_shape
.
opt_input_shape
=
{
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
2
,
32
,
64
,
64
],
"input_data"
:
[
2
,
32
,
64
,
64
],
"index_data"
:
[
2
,
2
]
"index_data"
:
[
2
,
2
]
,
}
}
def
clear_dynamic_shape
():
def
clear_dynamic_shape
():
...
@@ -281,25 +273,26 @@ class TrtConvertGatherNdTest_dim_4_2(TrtLayerAutoScanTest):
...
@@ -281,25 +273,26 @@ class TrtConvertGatherNdTest_dim_4_2(TrtLayerAutoScanTest):
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
(
0
,
4
),
1e-5
yield
self
.
create_inference_config
(),
(
0
,
4
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
(
0
,
4
),
1e-
5
yield
self
.
create_inference_config
(),
(
0
,
4
),
1e-
3
# for dynamic_shape
# for dynamic_shape
generate_dynamic_shape
(
attrs
)
generate_dynamic_shape
(
attrs
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
(
1
,
3
),
1e-5
yield
self
.
create_inference_config
(),
(
1
,
3
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
(
1
,
3
),
1e-
5
yield
self
.
create_inference_config
(),
(
1
,
3
),
1e-
3
def
add_skip_trt_case
(
self
):
def
add_skip_trt_case
(
self
):
def
teller1
(
program_config
,
predictor_config
):
def
teller1
(
program_config
,
predictor_config
):
if
len
(
self
.
dynamic_shape
.
min_input_shape
)
!=
0
and
os
.
name
==
'nt'
:
if
len
(
self
.
dynamic_shape
.
min_input_shape
)
!=
0
and
os
.
name
==
'nt'
:
return
True
return
True
return
False
return
False
self
.
add_skip_case
(
self
.
add_skip_case
(
teller1
,
SkipReasons
.
TRT_NOT_SUPPORT
,
teller1
,
"Under Windows Ci, this case will sporadically fail."
)
SkipReasons
.
TRT_NOT_SUPPORT
,
"Under Windows Ci, this case will sporadically fail."
,
)
def
test
(
self
):
def
test
(
self
):
self
.
add_skip_trt_case
()
self
.
add_skip_trt_case
()
...
@@ -307,29 +300,24 @@ class TrtConvertGatherNdTest_dim_4_2(TrtLayerAutoScanTest):
...
@@ -307,29 +300,24 @@ class TrtConvertGatherNdTest_dim_4_2(TrtLayerAutoScanTest):
class
TrtConvertGatherNdTest_dim_4_3
(
TrtLayerAutoScanTest
):
class
TrtConvertGatherNdTest_dim_4_3
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
return
True
return
True
def
sample_program_configs
(
self
):
def
sample_program_configs
(
self
):
def
generate_input1
():
def
generate_input1
():
return
np
.
random
.
random
([
2
,
32
,
64
,
64
]).
astype
(
np
.
float32
)
return
np
.
random
.
random
([
2
,
32
,
64
,
64
]).
astype
(
np
.
float32
)
def
generate_input2
():
def
generate_input2
():
return
np
.
ones
([
2
,
2
,
4
]).
astype
(
np
.
int32
)
return
np
.
ones
([
2
,
2
,
4
]).
astype
(
np
.
int32
)
ops_config
=
[{
ops_config
=
[
"op_type"
:
"gather_nd"
,
{
"op_inputs"
:
{
"op_type"
:
"gather_nd"
,
"X"
:
[
"input_data"
],
"op_inputs"
:
{
"X"
:
[
"input_data"
],
"Index"
:
[
"index_data"
]},
"Index"
:
[
"index_data"
]
"op_outputs"
:
{
"Out"
:
[
"output_data"
]},
},
"op_attrs"
:
{},
"op_outputs"
:
{
}
"Out"
:
[
"output_data"
]
]
},
"op_attrs"
:
{}
}]
ops
=
self
.
generate_op_config
(
ops_config
)
ops
=
self
.
generate_op_config
(
ops_config
)
program_config
=
ProgramConfig
(
program_config
=
ProgramConfig
(
...
@@ -339,25 +327,26 @@ class TrtConvertGatherNdTest_dim_4_3(TrtLayerAutoScanTest):
...
@@ -339,25 +327,26 @@ class TrtConvertGatherNdTest_dim_4_3(TrtLayerAutoScanTest):
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input1
)),
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input1
)),
"index_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input2
)),
"index_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input2
)),
},
},
outputs
=
[
"output_data"
])
outputs
=
[
"output_data"
],
)
yield
program_config
yield
program_config
def
sample_predictor_configs
(
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
def
generate_dynamic_shape
(
attrs
):
self
.
dynamic_shape
.
min_input_shape
=
{
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
8
,
8
,
8
],
"input_data"
:
[
1
,
8
,
8
,
8
],
"index_data"
:
[
2
,
2
,
4
]
"index_data"
:
[
2
,
2
,
4
]
,
}
}
self
.
dynamic_shape
.
max_input_shape
=
{
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
4
,
32
,
64
,
64
],
"input_data"
:
[
4
,
32
,
64
,
64
],
"index_data"
:
[
2
,
2
,
4
]
"index_data"
:
[
2
,
2
,
4
]
,
}
}
self
.
dynamic_shape
.
opt_input_shape
=
{
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
2
,
32
,
64
,
64
],
"input_data"
:
[
2
,
32
,
64
,
64
],
"index_data"
:
[
2
,
2
,
4
]
"index_data"
:
[
2
,
2
,
4
]
,
}
}
def
clear_dynamic_shape
():
def
clear_dynamic_shape
():
...
@@ -374,25 +363,26 @@ class TrtConvertGatherNdTest_dim_4_3(TrtLayerAutoScanTest):
...
@@ -374,25 +363,26 @@ class TrtConvertGatherNdTest_dim_4_3(TrtLayerAutoScanTest):
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
(
0
,
4
),
1e-5
yield
self
.
create_inference_config
(),
(
0
,
4
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
(
0
,
4
),
1e-
5
yield
self
.
create_inference_config
(),
(
0
,
4
),
1e-
3
# for dynamic_shape
# for dynamic_shape
generate_dynamic_shape
(
attrs
)
generate_dynamic_shape
(
attrs
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
(
1
,
3
),
1e-5
yield
self
.
create_inference_config
(),
(
1
,
3
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
(
1
,
3
),
1e-
5
yield
self
.
create_inference_config
(),
(
1
,
3
),
1e-
3
def
add_skip_trt_case
(
self
):
def
add_skip_trt_case
(
self
):
def
teller1
(
program_config
,
predictor_config
):
def
teller1
(
program_config
,
predictor_config
):
if
len
(
self
.
dynamic_shape
.
min_input_shape
)
!=
0
and
os
.
name
==
'nt'
:
if
len
(
self
.
dynamic_shape
.
min_input_shape
)
!=
0
and
os
.
name
==
'nt'
:
return
True
return
True
return
False
return
False
self
.
add_skip_case
(
self
.
add_skip_case
(
teller1
,
SkipReasons
.
TRT_NOT_SUPPORT
,
teller1
,
"Under Windows Ci, this case will sporadically fail."
)
SkipReasons
.
TRT_NOT_SUPPORT
,
"Under Windows Ci, this case will sporadically fail."
,
)
def
test
(
self
):
def
test
(
self
):
self
.
add_skip_trt_case
()
self
.
add_skip_trt_case
()
...
@@ -400,29 +390,24 @@ class TrtConvertGatherNdTest_dim_4_3(TrtLayerAutoScanTest):
...
@@ -400,29 +390,24 @@ class TrtConvertGatherNdTest_dim_4_3(TrtLayerAutoScanTest):
class
TrtConvertGatherNdTest_dim_2_2
(
TrtLayerAutoScanTest
):
class
TrtConvertGatherNdTest_dim_2_2
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
return
True
return
True
def
sample_program_configs
(
self
):
def
sample_program_configs
(
self
):
def
generate_input1
():
def
generate_input1
():
return
np
.
random
.
random
([
2
,
32
]).
astype
(
np
.
float32
)
return
np
.
random
.
random
([
2
,
32
]).
astype
(
np
.
float32
)
def
generate_input2
():
def
generate_input2
():
return
np
.
array
([[
0
,
3
],
[
1
,
9
]]).
astype
(
np
.
int32
)
return
np
.
array
([[
0
,
3
],
[
1
,
9
]]).
astype
(
np
.
int32
)
ops_config
=
[{
ops_config
=
[
"op_type"
:
"gather_nd"
,
{
"op_inputs"
:
{
"op_type"
:
"gather_nd"
,
"X"
:
[
"input_data"
],
"op_inputs"
:
{
"X"
:
[
"input_data"
],
"Index"
:
[
"index_data"
]},
"Index"
:
[
"index_data"
]
"op_outputs"
:
{
"Out"
:
[
"output_data"
]},
},
"op_attrs"
:
{},
"op_outputs"
:
{
}
"Out"
:
[
"output_data"
]
]
},
"op_attrs"
:
{}
}]
ops
=
self
.
generate_op_config
(
ops_config
)
ops
=
self
.
generate_op_config
(
ops_config
)
program_config
=
ProgramConfig
(
program_config
=
ProgramConfig
(
...
@@ -432,25 +417,26 @@ class TrtConvertGatherNdTest_dim_2_2(TrtLayerAutoScanTest):
...
@@ -432,25 +417,26 @@ class TrtConvertGatherNdTest_dim_2_2(TrtLayerAutoScanTest):
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input1
)),
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input1
)),
"index_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input2
)),
"index_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input2
)),
},
},
outputs
=
[
"output_data"
])
outputs
=
[
"output_data"
],
)
yield
program_config
yield
program_config
def
sample_predictor_configs
(
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
def
generate_dynamic_shape
(
attrs
):
self
.
dynamic_shape
.
min_input_shape
=
{
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
4
],
"input_data"
:
[
1
,
4
],
"index_data"
:
[
2
,
2
]
"index_data"
:
[
2
,
2
]
,
}
}
self
.
dynamic_shape
.
max_input_shape
=
{
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
4
,
64
],
"input_data"
:
[
4
,
64
],
"index_data"
:
[
2
,
2
]
"index_data"
:
[
2
,
2
]
,
}
}
self
.
dynamic_shape
.
opt_input_shape
=
{
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
2
,
8
],
"input_data"
:
[
2
,
8
],
"index_data"
:
[
2
,
2
]
"index_data"
:
[
2
,
2
]
,
}
}
def
clear_dynamic_shape
():
def
clear_dynamic_shape
():
...
@@ -467,25 +453,26 @@ class TrtConvertGatherNdTest_dim_2_2(TrtLayerAutoScanTest):
...
@@ -467,25 +453,26 @@ class TrtConvertGatherNdTest_dim_2_2(TrtLayerAutoScanTest):
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
(
0
,
4
),
1e-5
yield
self
.
create_inference_config
(),
(
0
,
4
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
(
0
,
4
),
1e-
5
yield
self
.
create_inference_config
(),
(
0
,
4
),
1e-
3
# for dynamic_shape
# for dynamic_shape
generate_dynamic_shape
(
attrs
)
generate_dynamic_shape
(
attrs
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
(
1
,
3
),
1e-5
yield
self
.
create_inference_config
(),
(
1
,
3
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
(
1
,
3
),
1e-
5
yield
self
.
create_inference_config
(),
(
1
,
3
),
1e-
3
def
add_skip_trt_case
(
self
):
def
add_skip_trt_case
(
self
):
def
teller1
(
program_config
,
predictor_config
):
def
teller1
(
program_config
,
predictor_config
):
if
len
(
self
.
dynamic_shape
.
min_input_shape
)
!=
0
and
os
.
name
==
'nt'
:
if
len
(
self
.
dynamic_shape
.
min_input_shape
)
!=
0
and
os
.
name
==
'nt'
:
return
True
return
True
return
False
return
False
self
.
add_skip_case
(
self
.
add_skip_case
(
teller1
,
SkipReasons
.
TRT_NOT_SUPPORT
,
teller1
,
"Under Windows Ci, this case will sporadically fail."
)
SkipReasons
.
TRT_NOT_SUPPORT
,
"Under Windows Ci, this case will sporadically fail."
,
)
def
test
(
self
):
def
test
(
self
):
self
.
add_skip_trt_case
()
self
.
add_skip_trt_case
()
...
@@ -493,30 +480,26 @@ class TrtConvertGatherNdTest_dim_2_2(TrtLayerAutoScanTest):
...
@@ -493,30 +480,26 @@ class TrtConvertGatherNdTest_dim_2_2(TrtLayerAutoScanTest):
class
TrtConvertGatherNdTest_dim_3_3
(
TrtLayerAutoScanTest
):
class
TrtConvertGatherNdTest_dim_3_3
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
return
True
return
True
def
sample_program_configs
(
self
):
def
sample_program_configs
(
self
):
def
generate_input1
():
def
generate_input1
():
return
np
.
random
.
random
([
16
,
32
,
256
]).
astype
(
np
.
float32
)
return
np
.
random
.
random
([
16
,
32
,
256
]).
astype
(
np
.
float32
)
def
generate_input2
():
def
generate_input2
():
return
np
.
array
([[[
2
,
5
],
[
3
,
8
]],
[[
0
,
2
],
[
0
,
return
np
.
array
([[[
2
,
5
],
[
3
,
8
]],
[[
0
,
2
],
[
0
,
3
]]]).
astype
(
3
]]]).
astype
(
np
.
int32
)
np
.
int32
)
ops_config
=
[{
"op_type"
:
"gather_nd"
,
ops_config
=
[
"op_inputs"
:
{
{
"X"
:
[
"input_data"
],
"op_type"
:
"gather_nd"
,
"Index"
:
[
"index_data"
]
"op_inputs"
:
{
"X"
:
[
"input_data"
],
"Index"
:
[
"index_data"
]},
},
"op_outputs"
:
{
"Out"
:
[
"output_data"
]},
"op_outputs"
:
{
"op_attrs"
:
{},
"Out"
:
[
"output_data"
]
}
},
]
"op_attrs"
:
{}
}]
ops
=
self
.
generate_op_config
(
ops_config
)
ops
=
self
.
generate_op_config
(
ops_config
)
program_config
=
ProgramConfig
(
program_config
=
ProgramConfig
(
...
@@ -526,25 +509,26 @@ class TrtConvertGatherNdTest_dim_3_3(TrtLayerAutoScanTest):
...
@@ -526,25 +509,26 @@ class TrtConvertGatherNdTest_dim_3_3(TrtLayerAutoScanTest):
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input1
)),
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input1
)),
"index_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input2
)),
"index_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input2
)),
},
},
outputs
=
[
"output_data"
])
outputs
=
[
"output_data"
],
)
yield
program_config
yield
program_config
def
sample_predictor_configs
(
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
def
generate_dynamic_shape
(
attrs
):
self
.
dynamic_shape
.
min_input_shape
=
{
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
4
,
4
],
"input_data"
:
[
1
,
4
,
4
],
"index_data"
:
[
1
,
1
,
1
]
"index_data"
:
[
1
,
1
,
1
]
,
}
}
self
.
dynamic_shape
.
max_input_shape
=
{
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
16
,
64
,
512
],
"input_data"
:
[
16
,
64
,
512
],
"index_data"
:
[
4
,
2
,
4
]
"index_data"
:
[
4
,
2
,
4
]
,
}
}
self
.
dynamic_shape
.
opt_input_shape
=
{
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
2
,
8
,
64
],
"input_data"
:
[
2
,
8
,
64
],
"index_data"
:
[
2
,
2
,
2
]
"index_data"
:
[
2
,
2
,
2
]
,
}
}
def
clear_dynamic_shape
():
def
clear_dynamic_shape
():
...
@@ -561,14 +545,14 @@ class TrtConvertGatherNdTest_dim_3_3(TrtLayerAutoScanTest):
...
@@ -561,14 +545,14 @@ class TrtConvertGatherNdTest_dim_3_3(TrtLayerAutoScanTest):
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
(
0
,
4
),
1e-5
yield
self
.
create_inference_config
(),
(
0
,
4
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
(
0
,
4
),
1e-
5
yield
self
.
create_inference_config
(),
(
0
,
4
),
1e-
3
# for dynamic_shape
# for dynamic_shape
generate_dynamic_shape
(
attrs
)
generate_dynamic_shape
(
attrs
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
(
1
,
3
),
1e-5
yield
self
.
create_inference_config
(),
(
1
,
3
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
(
1
,
3
),
1e-
5
yield
self
.
create_inference_config
(),
(
1
,
3
),
1e-
3
def
test
(
self
):
def
test
(
self
):
self
.
run_test
()
self
.
run_test
()
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_gelu.py
View file @
dbe08e9b
...
@@ -22,12 +22,10 @@ import unittest
...
@@ -22,12 +22,10 @@ import unittest
class
TrtConvertGeluTest
(
TrtLayerAutoScanTest
):
class
TrtConvertGeluTest
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
return
True
return
True
def
sample_program_configs
(
self
):
def
sample_program_configs
(
self
):
def
generate_input1
(
dims
,
attrs
:
List
[
Dict
[
str
,
Any
]]):
def
generate_input1
(
dims
,
attrs
:
List
[
Dict
[
str
,
Any
]]):
if
dims
==
1
:
if
dims
==
1
:
return
np
.
ones
([
32
]).
astype
(
np
.
float32
)
return
np
.
ones
([
32
]).
astype
(
np
.
float32
)
...
@@ -43,33 +41,32 @@ class TrtConvertGeluTest(TrtLayerAutoScanTest):
...
@@ -43,33 +41,32 @@ class TrtConvertGeluTest(TrtLayerAutoScanTest):
self
.
dims
=
dims
self
.
dims
=
dims
dics
=
[{
"approximate"
:
approximate
}]
dics
=
[{
"approximate"
:
approximate
}]
ops_config
=
[{
ops_config
=
[
"op_type"
:
"gelu"
,
{
"op_inputs"
:
{
"op_type"
:
"gelu"
,
"X"
:
[
"input_data"
]
"op_inputs"
:
{
"X"
:
[
"input_data"
]},
},
"op_outputs"
:
{
"Out"
:
[
"output_data"
]},
"op_outputs"
:
{
"op_attrs"
:
dics
[
0
],
"Out"
:
[
"output_data"
]
}
},
]
"op_attrs"
:
dics
[
0
]
}]
ops
=
self
.
generate_op_config
(
ops_config
)
ops
=
self
.
generate_op_config
(
ops_config
)
program_config
=
ProgramConfig
(
program_config
=
ProgramConfig
(
ops
=
ops
,
ops
=
ops
,
weights
=
{},
weights
=
{},
inputs
=
{
inputs
=
{
"input_data"
:
"input_data"
:
TensorConfig
(
TensorConfig
(
data_gen
=
partial
(
generate_input1
,
dims
,
dics
)
data_gen
=
partial
(
generate_input1
,
dims
,
dics
)
)
)
},
},
outputs
=
[
"output_data"
])
outputs
=
[
"output_data"
],
)
yield
program_config
yield
program_config
def
sample_predictor_configs
(
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
def
generate_dynamic_shape
(
attrs
):
if
self
.
dims
==
1
:
if
self
.
dims
==
1
:
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
]}
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
]}
...
@@ -123,19 +120,23 @@ class TrtConvertGeluTest(TrtLayerAutoScanTest):
...
@@ -123,19 +120,23 @@ class TrtConvertGeluTest(TrtLayerAutoScanTest):
clear_dynamic_shape
()
clear_dynamic_shape
()
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
1e-5
attrs
,
False
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
1e-5
attrs
,
False
),
1e-3
# for dynamic_shape
# for dynamic_shape
generate_dynamic_shape
(
attrs
)
generate_dynamic_shape
(
attrs
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
1e-5
attrs
,
True
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
1e-5
attrs
,
True
),
1e-3
def
test
(
self
):
def
test
(
self
):
self
.
run_test
()
self
.
run_test
()
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_grid_sampler.py
View file @
dbe08e9b
...
@@ -22,29 +22,27 @@ import unittest
...
@@ -22,29 +22,27 @@ import unittest
class
TrtConvertGridSampler
(
TrtLayerAutoScanTest
):
class
TrtConvertGridSampler
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
return
True
return
True
def
sample_program_configs
(
self
):
def
sample_program_configs
(
self
):
def
generate_input1
():
def
generate_input1
():
return
np
.
random
.
random
([
1
,
3
,
32
,
32
]).
astype
(
np
.
float32
)
return
np
.
random
.
random
([
1
,
3
,
32
,
32
]).
astype
(
np
.
float32
)
def
generate_input2
():
def
generate_input2
():
return
np
.
random
.
random
([
1
,
3
,
3
,
2
]).
astype
(
np
.
float32
)
return
np
.
random
.
random
([
1
,
3
,
3
,
2
]).
astype
(
np
.
float32
)
ops_config
=
[
{
ops_config
=
[
"op_type"
:
"grid_sampler"
,
{
"op_inputs"
:
{
"op_type"
:
"grid_sampler"
,
"
X"
:
[
"input_data"
],
"
op_inputs"
:
{
"Grid"
:
[
"grid
_data"
],
"X"
:
[
"input
_data"
],
}
,
"Grid"
:
[
"grid_data"
]
,
"op_outputs"
:
{
},
"Output"
:
[
"output_data"
]
"op_outputs"
:
{
"Output"
:
[
"output_data"
]
},
},
"op_attrs"
:
{
},
"op_attrs"
:
{
}
}
}
]
]
ops
=
self
.
generate_op_config
(
ops_config
)
ops
=
self
.
generate_op_config
(
ops_config
)
for
i
in
range
(
10
):
for
i
in
range
(
10
):
...
@@ -52,30 +50,33 @@ class TrtConvertGridSampler(TrtLayerAutoScanTest):
...
@@ -52,30 +50,33 @@ class TrtConvertGridSampler(TrtLayerAutoScanTest):
ops
=
ops
,
ops
=
ops
,
weights
=
{},
weights
=
{},
inputs
=
{
inputs
=
{
"input_data"
:
"input_data"
:
TensorConfig
(
TensorConfig
(
data_gen
=
partial
(
generate_input1
)),
data_gen
=
partial
(
generate_input1
)
"grid_data"
:
),
TensorConfig
(
data_gen
=
partial
(
generate_input2
)),
"grid_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input2
)
),
},
},
outputs
=
[
"output_data"
])
outputs
=
[
"output_data"
],
)
yield
program_config
yield
program_config
def
sample_predictor_configs
(
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
def
generate_dynamic_shape
(
attrs
):
self
.
dynamic_shape
.
min_input_shape
=
{
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
3
,
32
,
32
],
"input_data"
:
[
1
,
3
,
32
,
32
],
"grid_data"
:
[
1
,
3
,
3
,
2
]
"grid_data"
:
[
1
,
3
,
3
,
2
]
,
}
}
self
.
dynamic_shape
.
max_input_shape
=
{
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
1
,
3
,
64
,
64
],
"input_data"
:
[
1
,
3
,
64
,
64
],
"grid_data"
:
[
1
,
3
,
4
,
4
]
"grid_data"
:
[
1
,
3
,
4
,
4
]
,
}
}
self
.
dynamic_shape
.
opt_input_shape
=
{
self
.
dynamic_shape
.
opt_input_shape
=
{
"input_data"
:
[
1
,
3
,
32
,
32
],
"input_data"
:
[
1
,
3
,
32
,
32
],
"grid_data"
:
[
1
,
3
,
3
,
2
]
"grid_data"
:
[
1
,
3
,
3
,
2
]
,
}
}
def
clear_dynamic_shape
():
def
clear_dynamic_shape
():
...
@@ -92,14 +93,14 @@ class TrtConvertGridSampler(TrtLayerAutoScanTest):
...
@@ -92,14 +93,14 @@ class TrtConvertGridSampler(TrtLayerAutoScanTest):
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
(
0
,
4
),
1e-5
yield
self
.
create_inference_config
(),
(
0
,
4
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
(
0
,
4
),
1e-
5
yield
self
.
create_inference_config
(),
(
0
,
4
),
1e-
3
# for dynamic_shape
# for dynamic_shape
generate_dynamic_shape
(
attrs
)
generate_dynamic_shape
(
attrs
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
(
1
,
3
),
1e-5
yield
self
.
create_inference_config
(),
(
1
,
3
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
(
1
,
3
),
1e-
5
yield
self
.
create_inference_config
(),
(
1
,
3
),
1e-
3
def
test
(
self
):
def
test
(
self
):
self
.
run_test
()
self
.
run_test
()
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_group_norm.py
View file @
dbe08e9b
...
@@ -22,7 +22,6 @@ import unittest
...
@@ -22,7 +22,6 @@ import unittest
class
TrtConvertGroupNormTest
(
TrtLayerAutoScanTest
):
class
TrtConvertGroupNormTest
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
inputs
=
program_config
.
inputs
inputs
=
program_config
.
inputs
weights
=
program_config
.
weights
weights
=
program_config
.
weights
...
@@ -36,7 +35,6 @@ class TrtConvertGroupNormTest(TrtLayerAutoScanTest):
...
@@ -36,7 +35,6 @@ class TrtConvertGroupNormTest(TrtLayerAutoScanTest):
return
True
return
True
def
sample_program_configs
(
self
):
def
sample_program_configs
(
self
):
def
generate_input
(
attrs
:
List
[
Dict
[
str
,
Any
]],
batch
):
def
generate_input
(
attrs
:
List
[
Dict
[
str
,
Any
]],
batch
):
if
attrs
[
0
][
'data_layout'
]
==
'NCHW'
:
if
attrs
[
0
][
'data_layout'
]
==
'NCHW'
:
return
np
.
random
.
random
([
batch
,
32
,
64
,
64
]).
astype
(
np
.
float32
)
return
np
.
random
.
random
([
batch
,
32
,
64
,
64
]).
astype
(
np
.
float32
)
...
@@ -53,47 +51,56 @@ class TrtConvertGroupNormTest(TrtLayerAutoScanTest):
...
@@ -53,47 +51,56 @@ class TrtConvertGroupNormTest(TrtLayerAutoScanTest):
for
group
in
[
1
,
4
,
32
,
-
1
]:
for
group
in
[
1
,
4
,
32
,
-
1
]:
for
epsilon
in
[
0.0001
,
0.0007
,
-
1
,
1
]:
for
epsilon
in
[
0.0001
,
0.0007
,
-
1
,
1
]:
for
data_layout
in
[
'NCHW'
]:
for
data_layout
in
[
'NCHW'
]:
dics
=
[{
dics
=
[
"epsilon"
:
epsilon
,
{
"groups"
:
group
,
"epsilon"
:
epsilon
,
"data_layout"
:
data_layout
"groups"
:
group
,
}]
"data_layout"
:
data_layout
,
ops_config
=
[{
}
"op_type"
:
"group_norm"
,
]
"op_inputs"
:
{
ops_config
=
[
"X"
:
[
"input_data"
],
{
"Scale"
:
[
"scale_weight"
],
"op_type"
:
"group_norm"
,
"Bias"
:
[
"bias_weight"
]
"op_inputs"
:
{
},
"X"
:
[
"input_data"
],
"op_outputs"
:
{
"Scale"
:
[
"scale_weight"
],
"Y"
:
[
"y_output"
],
"Bias"
:
[
"bias_weight"
],
"Mean"
:
[
"mean_output"
],
},
"Variance"
:
[
"variance_output"
]
"op_outputs"
:
{
},
"Y"
:
[
"y_output"
],
"op_attrs"
:
dics
[
0
]
"Mean"
:
[
"mean_output"
],
}]
"Variance"
:
[
"variance_output"
],
},
"op_attrs"
:
dics
[
0
],
}
]
ops
=
self
.
generate_op_config
(
ops_config
)
ops
=
self
.
generate_op_config
(
ops_config
)
program_config
=
ProgramConfig
(
program_config
=
ProgramConfig
(
ops
=
ops
,
ops
=
ops
,
weights
=
{
weights
=
{
"scale_weight"
:
"scale_weight"
:
TensorConfig
(
TensorConfig
(
data_gen
=
partial
(
generate_scale
)),
data_gen
=
partial
(
generate_scale
)
"bias_weight"
:
),
TensorConfig
(
data_gen
=
partial
(
generate_bias
))
"bias_weight"
:
TensorConfig
(
data_gen
=
partial
(
generate_bias
)
),
},
},
inputs
=
{
inputs
=
{
"input_data"
:
"input_data"
:
TensorConfig
(
TensorConfig
(
data_gen
=
partial
(
data_gen
=
partial
(
generate_input
,
dics
,
batch
))
generate_input
,
dics
,
batch
)
)
},
},
outputs
=
[
"y_output"
])
outputs
=
[
"y_output"
],
)
yield
program_config
yield
program_config
def
sample_predictor_configs
(
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
def
generate_dynamic_shape
(
attrs
):
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
16
,
16
,
16
]}
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
16
,
16
,
16
]}
self
.
dynamic_shape
.
max_input_shape
=
{
self
.
dynamic_shape
.
max_input_shape
=
{
...
@@ -117,19 +124,23 @@ class TrtConvertGroupNormTest(TrtLayerAutoScanTest):
...
@@ -117,19 +124,23 @@ class TrtConvertGroupNormTest(TrtLayerAutoScanTest):
clear_dynamic_shape
()
clear_dynamic_shape
()
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
1e-5
attrs
,
False
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
1e-5
attrs
,
False
),
(
1e-3
,
1e-3
)
# for dynamic_shape
# for dynamic_shape
generate_dynamic_shape
(
attrs
)
generate_dynamic_shape
(
attrs
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
1e-5
attrs
,
True
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
1e-5
attrs
,
True
),
(
1e-3
,
1e-3
)
def
add_skip_trt_case
(
self
):
def
add_skip_trt_case
(
self
):
pass
pass
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_hard_sigmoid.py
View file @
dbe08e9b
...
@@ -22,12 +22,10 @@ import unittest
...
@@ -22,12 +22,10 @@ import unittest
class
TrtConvertHardSigmoidTest_dim_2
(
TrtLayerAutoScanTest
):
class
TrtConvertHardSigmoidTest_dim_2
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
return
True
return
True
def
sample_program_configs
(
self
):
def
sample_program_configs
(
self
):
def
generate_input
(
shape
):
def
generate_input
(
shape
):
return
np
.
random
.
random
(
shape
).
astype
(
np
.
float32
)
return
np
.
random
.
random
(
shape
).
astype
(
np
.
float32
)
...
@@ -37,33 +35,34 @@ class TrtConvertHardSigmoidTest_dim_2(TrtLayerAutoScanTest):
...
@@ -37,33 +35,34 @@ class TrtConvertHardSigmoidTest_dim_2(TrtLayerAutoScanTest):
for
slope
in
[
0.1
,
0.5
]:
for
slope
in
[
0.1
,
0.5
]:
for
offset
in
[
0.2
,
0.7
]:
for
offset
in
[
0.2
,
0.7
]:
dics
=
[{
"slope"
:
slope
,
"offset"
:
offset
}]
dics
=
[{
"slope"
:
slope
,
"offset"
:
offset
}]
ops_config
=
[
{
ops_config
=
[
"op_type"
:
"hard_sigmoid"
,
{
"op_inputs"
:
{
"op_type"
:
"hard_sigmoid"
,
"
X"
:
[
"input_data"
],
"
op_inputs"
:
{
}
,
"X"
:
[
"input_data"
]
,
"op_outputs"
:
{
},
"Out"
:
[
"output_data"
]
"op_outputs"
:
{
"Out"
:
[
"output_data"
]
},
}
,
"op_attrs"
:
dics
[
0
]
,
"op_attrs"
:
dics
[
0
]
}
}
]
]
ops
=
self
.
generate_op_config
(
ops_config
)
ops
=
self
.
generate_op_config
(
ops_config
)
program_config
=
ProgramConfig
(
program_config
=
ProgramConfig
(
ops
=
ops
,
ops
=
ops
,
weights
=
{},
weights
=
{},
inputs
=
{
inputs
=
{
"input_data"
:
"input_data"
:
TensorConfig
(
TensorConfig
(
data_gen
=
partial
(
generate_input
,
shape
)
data_gen
=
partial
(
generate_input
,
shape
)
)
)
},
},
outputs
=
[
"output_data"
])
outputs
=
[
"output_data"
],
)
yield
program_config
yield
program_config
def
sample_predictor_configs
(
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
def
generate_dynamic_shape
(
attrs
):
if
self
.
input_dim
==
2
:
if
self
.
input_dim
==
2
:
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
8
]}
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
8
]}
...
@@ -98,14 +97,14 @@ class TrtConvertHardSigmoidTest_dim_2(TrtLayerAutoScanTest):
...
@@ -98,14 +97,14 @@ class TrtConvertHardSigmoidTest_dim_2(TrtLayerAutoScanTest):
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
(
1
,
2
),
1e-5
yield
self
.
create_inference_config
(),
(
1
,
2
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
(
1
,
2
),
1e-
5
yield
self
.
create_inference_config
(),
(
1
,
2
),
1e-
3
# for dynamic_shape
# for dynamic_shape
generate_dynamic_shape
(
attrs
)
generate_dynamic_shape
(
attrs
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
(
1
,
2
),
1e-5
yield
self
.
create_inference_config
(),
(
1
,
2
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
(
1
,
2
),
1e-
5
yield
self
.
create_inference_config
(),
(
1
,
2
),
1e-
3
def
test
(
self
):
def
test
(
self
):
self
.
run_test
()
self
.
run_test
()
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_hard_swish.py
View file @
dbe08e9b
...
@@ -22,7 +22,6 @@ import unittest
...
@@ -22,7 +22,6 @@ import unittest
class
TrtConvertHardSwishTest
(
TrtLayerAutoScanTest
):
class
TrtConvertHardSwishTest
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
inputs
=
program_config
.
inputs
inputs
=
program_config
.
inputs
weights
=
program_config
.
weights
weights
=
program_config
.
weights
...
@@ -36,46 +35,46 @@ class TrtConvertHardSwishTest(TrtLayerAutoScanTest):
...
@@ -36,46 +35,46 @@ class TrtConvertHardSwishTest(TrtLayerAutoScanTest):
return
True
return
True
def
sample_program_configs
(
self
):
def
sample_program_configs
(
self
):
def
generate_input1
(
attrs
:
List
[
Dict
[
str
,
Any
]]):
def
generate_input1
(
attrs
:
List
[
Dict
[
str
,
Any
]]):
return
np
.
ones
([
1
,
3
,
32
,
32
]).
astype
(
np
.
float32
)
return
np
.
ones
([
1
,
3
,
32
,
32
]).
astype
(
np
.
float32
)
for
threshold
in
[
6.0
,
7.0
,
100.0
,
0.0
,
-
1.0
]:
for
threshold
in
[
6.0
,
7.0
,
100.0
,
0.0
,
-
1.0
]:
for
scale
in
[
5.0
,
7.0
,
-
1.0
,
0.0
,
100.0
]:
for
scale
in
[
5.0
,
7.0
,
-
1.0
,
0.0
,
100.0
]:
for
offset
in
[
3.0
,
5.0
,
-
1.0
,
0.0
,
100.0
]:
for
offset
in
[
3.0
,
5.0
,
-
1.0
,
0.0
,
100.0
]:
dics
=
[
{
dics
=
[
"threshold"
:
threshold
,
{
"scale"
:
scale
,
"threshold"
:
threshold
,
"offset"
:
offset
"scale"
:
scale
,
}]
"offset"
:
offset
,
}
ops_config
=
[{
]
"op_type"
:
"hard_swish"
,
"op_inputs"
:
{
ops_config
=
[
"X"
:
[
"input_data"
]
{
}
,
"op_type"
:
"hard_swish"
,
"op_
out
puts"
:
{
"op_
in
puts"
:
{
"X"
:
[
"input_data"
]},
"Out"
:
[
"hard_swish_output_data"
]
"op_outputs"
:
{
"Out"
:
[
"hard_swish_output_data"
]
},
}
,
"op_attrs"
:
dics
[
0
]
,
"op_attrs"
:
dics
[
0
]
}
}
]
]
ops
=
self
.
generate_op_config
(
ops_config
)
ops
=
self
.
generate_op_config
(
ops_config
)
program_config
=
ProgramConfig
(
program_config
=
ProgramConfig
(
ops
=
ops
,
ops
=
ops
,
weights
=
{},
weights
=
{},
inputs
=
{
inputs
=
{
"input_data"
:
"input_data"
:
TensorConfig
(
TensorConfig
(
data_gen
=
partial
(
generate_input1
,
dics
)
data_gen
=
partial
(
generate_input1
,
dics
)
)
)
},
},
outputs
=
[
"hard_swish_output_data"
])
outputs
=
[
"hard_swish_output_data"
],
)
yield
program_config
yield
program_config
def
sample_predictor_configs
(
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
def
generate_dynamic_shape
(
attrs
):
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
3
,
16
,
16
]}
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
3
,
16
,
16
]}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
2
,
3
,
32
,
32
]}
self
.
dynamic_shape
.
max_input_shape
=
{
"input_data"
:
[
2
,
3
,
32
,
32
]}
...
@@ -97,19 +96,23 @@ class TrtConvertHardSwishTest(TrtLayerAutoScanTest):
...
@@ -97,19 +96,23 @@ class TrtConvertHardSwishTest(TrtLayerAutoScanTest):
clear_dynamic_shape
()
clear_dynamic_shape
()
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
1e-5
attrs
,
False
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
(
1e-5
,
1e-5
)
attrs
,
False
),
(
1e-3
,
1e-3
)
# for dynamic_shape
# for dynamic_shape
generate_dynamic_shape
(
attrs
)
generate_dynamic_shape
(
attrs
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
1e-5
attrs
,
True
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
(
1e-5
,
1e-5
)
attrs
,
True
),
(
1e-3
,
1e-3
)
def
test
(
self
):
def
test
(
self
):
self
.
run_test
()
self
.
run_test
()
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_inverse.py
View file @
dbe08e9b
...
@@ -22,41 +22,41 @@ import unittest
...
@@ -22,41 +22,41 @@ import unittest
class
TrtConvertInverse
(
TrtLayerAutoScanTest
):
class
TrtConvertInverse
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
return
True
return
True
def
sample_program_configs
(
self
):
def
sample_program_configs
(
self
):
def
generate_input1
():
def
generate_input1
():
return
np
.
random
.
random
([
32
,
32
]).
astype
(
np
.
float32
)
return
np
.
random
.
random
([
32
,
32
]).
astype
(
np
.
float32
)
ops_config
=
[
{
ops_config
=
[
"op_type"
:
"inverse"
,
{
"op_inputs"
:
{
"op_type"
:
"inverse"
,
"
Input"
:
[
"input_data"
],
"
op_inputs"
:
{
}
,
"Input"
:
[
"input_data"
]
,
"op_outputs"
:
{
},
"Output"
:
[
"output_data"
]
"op_outputs"
:
{
"Output"
:
[
"output_data"
]
},
},
"op_attrs"
:
{
},
"op_attrs"
:
{
}
}
}
]
]
ops
=
self
.
generate_op_config
(
ops_config
)
ops
=
self
.
generate_op_config
(
ops_config
)
for
i
in
range
(
10
):
for
i
in
range
(
10
):
program_config
=
ProgramConfig
(
program_config
=
ProgramConfig
(
ops
=
ops
,
ops
=
ops
,
weights
=
{},
weights
=
{},
inputs
=
{
inputs
=
{
"input_data"
:
"input_data"
:
TensorConfig
(
TensorConfig
(
data_gen
=
partial
(
generate_input1
)),
data_gen
=
partial
(
generate_input1
)
),
},
},
outputs
=
[
"output_data"
])
outputs
=
[
"output_data"
],
)
yield
program_config
yield
program_config
def
sample_predictor_configs
(
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
def
generate_dynamic_shape
(
attrs
):
self
.
dynamic_shape
.
min_input_shape
=
{
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
1
],
"input_data"
:
[
1
,
1
],
...
@@ -82,14 +82,14 @@ class TrtConvertInverse(TrtLayerAutoScanTest):
...
@@ -82,14 +82,14 @@ class TrtConvertInverse(TrtLayerAutoScanTest):
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
(
0
,
3
),
1e-5
yield
self
.
create_inference_config
(),
(
0
,
3
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
(
0
,
3
),
1e-
5
yield
self
.
create_inference_config
(),
(
0
,
3
),
1e-
3
# for dynamic_shape
# for dynamic_shape
generate_dynamic_shape
(
attrs
)
generate_dynamic_shape
(
attrs
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
(
1
,
2
),
1e-5
yield
self
.
create_inference_config
(),
(
1
,
2
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
(
1
,
2
),
1e-
5
yield
self
.
create_inference_config
(),
(
1
,
2
),
1e-
3
def
test
(
self
):
def
test
(
self
):
self
.
run_test
()
self
.
run_test
()
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_leaky_relu.py
View file @
dbe08e9b
...
@@ -23,12 +23,10 @@ import unittest
...
@@ -23,12 +23,10 @@ import unittest
class
TrtConvertLeakyReluTest
(
TrtLayerAutoScanTest
):
class
TrtConvertLeakyReluTest
(
TrtLayerAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
return
True
return
True
def
sample_program_configs
(
self
):
def
sample_program_configs
(
self
):
def
generate_input1
(
shape
):
def
generate_input1
(
shape
):
return
np
.
random
.
random
(
shape
).
astype
(
np
.
float32
)
return
np
.
random
.
random
(
shape
).
astype
(
np
.
float32
)
...
@@ -37,32 +35,35 @@ class TrtConvertLeakyReluTest(TrtLayerAutoScanTest):
...
@@ -37,32 +35,35 @@ class TrtConvertLeakyReluTest(TrtLayerAutoScanTest):
self
.
input_dim
=
len
(
shape
)
self
.
input_dim
=
len
(
shape
)
for
alpha
in
[
0.02
,
1.0
,
100.0
,
-
1.0
,
0.0
]:
for
alpha
in
[
0.02
,
1.0
,
100.0
,
-
1.0
,
0.0
]:
dics
=
[{
"alpha"
:
alpha
}]
dics
=
[{
"alpha"
:
alpha
}]
ops_config
=
[{
ops_config
=
[
"op_type"
:
"leaky_relu"
,
{
"op_inputs"
:
{
"op_type"
:
"leaky_relu"
,
"X"
:
[
"input_data"
],
"op_inputs"
:
{
},
"X"
:
[
"input_data"
],
"op_outputs"
:
{
},
"Out"
:
[
"y_data"
],
"op_outputs"
:
{
},
"Out"
:
[
"y_data"
],
"op_attrs"
:
dics
[
0
]
},
}]
"op_attrs"
:
dics
[
0
],
}
]
ops
=
self
.
generate_op_config
(
ops_config
)
ops
=
self
.
generate_op_config
(
ops_config
)
program_config
=
ProgramConfig
(
program_config
=
ProgramConfig
(
ops
=
ops
,
ops
=
ops
,
weights
=
{},
weights
=
{},
inputs
=
{
inputs
=
{
"input_data"
:
"input_data"
:
TensorConfig
(
TensorConfig
(
data_gen
=
partial
(
generate_input1
,
shape
)
data_gen
=
partial
(
generate_input1
,
shape
)
)
)
},
},
outputs
=
[
"y_data"
])
outputs
=
[
"y_data"
],
)
yield
program_config
yield
program_config
def
sample_predictor_configs
(
def
sample_predictor_configs
(
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
self
,
program_config
)
->
(
paddle_infer
.
Config
,
List
[
int
],
float
):
def
generate_dynamic_shape
(
attrs
):
def
generate_dynamic_shape
(
attrs
):
if
self
.
input_dim
==
2
:
if
self
.
input_dim
==
2
:
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
8
]}
self
.
dynamic_shape
.
min_input_shape
=
{
"input_data"
:
[
1
,
8
]}
...
@@ -101,25 +102,31 @@ class TrtConvertLeakyReluTest(TrtLayerAutoScanTest):
...
@@ -101,25 +102,31 @@ class TrtConvertLeakyReluTest(TrtLayerAutoScanTest):
clear_dynamic_shape
()
clear_dynamic_shape
()
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
1e-5
attrs
,
False
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
(
1e-5
,
1e-5
)
attrs
,
False
),
(
1e-3
,
1e-3
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Int8
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Int8
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
False
),
(
1e-5
,
1e-5
)
attrs
,
False
),
(
1e-3
,
1e-3
)
# for dynamic_shape
# for dynamic_shape
generate_dynamic_shape
(
attrs
)
generate_dynamic_shape
(
attrs
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Float32
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
1e-5
attrs
,
True
),
1e-5
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Half
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
(
1e-5
,
1e-5
)
attrs
,
True
),
(
1e-3
,
1e-3
)
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Int8
self
.
trt_param
.
precision
=
paddle_infer
.
PrecisionType
.
Int8
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
yield
self
.
create_inference_config
(),
generate_trt_nodes_num
(
attrs
,
True
),
(
1e-5
,
1e-5
)
attrs
,
True
),
(
1e-3
,
1e-3
)
def
test
(
self
):
def
test
(
self
):
self
.
run_test
()
self
.
run_test
()
...
...
Prev
1
…
7
8
9
10
11
12
13
14
15
16
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment