Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
84242aae
Commit
84242aae
authored
Aug 16, 2018
by
Paul
Browse files
Add option to allocate on the host
parent
95ee5ad4
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
53 additions
and
12 deletions
+53
-12
src/onnx/perf_onnx.cpp
src/onnx/perf_onnx.cpp
+2
-0
src/targets/gpu/hip.cpp
src/targets/gpu/hip.cpp
+25
-10
src/targets/gpu/include/migraph/gpu/hip.hpp
src/targets/gpu/include/migraph/gpu/hip.hpp
+2
-2
test/gpu/miopen.cpp
test/gpu/miopen.cpp
+24
-0
No files found.
src/onnx/perf_onnx.cpp
View file @
84242aae
...
...
@@ -25,8 +25,10 @@ int main(int argc, char const* argv[])
{
std
::
string
file
=
argv
[
1
];
auto
p
=
migraph
::
parse_onnx
(
file
);
std
::
cout
<<
"Compiling ... "
<<
std
::
endl
;
p
.
compile
(
migraph
::
gpu
::
target
{});
auto
m
=
create_param_map
(
p
);
std
::
cout
<<
"Running performance report ... "
<<
std
::
endl
;
p
.
perf_report
(
std
::
cout
,
10
,
m
);
}
}
src/targets/gpu/hip.cpp
View file @
84242aae
...
...
@@ -13,12 +13,27 @@ using hip_ptr = MIGRAPH_MANAGE_PTR(void, hipFree);
std
::
string
hip_error
(
int
error
)
{
return
hipGetErrorString
(
static_cast
<
hipError_t
>
(
error
));
}
hip_ptr
allocate_gpu
(
std
::
size_t
sz
)
std
::
size_t
get_available_gpu_memory
(
)
{
void
*
result
;
auto
status
=
hipM
alloc
(
&
re
sult
,
sz
);
size_t
free
,
total
;
auto
status
=
hipM
emGetInfo
(
&
f
re
e
,
&
total
);
if
(
status
!=
hipSuccess
)
MIGRAPH_THROW
(
"Gpu allocation failed: "
+
hip_error
(
status
));
MIGRAPH_THROW
(
"Failed getting available memory: "
+
hip_error
(
status
));
return
free
;
}
hip_ptr
allocate_gpu
(
std
::
size_t
sz
,
bool
host
=
false
)
{
if
(
sz
>
get_available_gpu_memory
())
MIGRAPH_THROW
(
"Memory not available to allocate buffer: "
+
std
::
to_string
(
sz
));
void
*
result
;
auto
status
=
host
?
hipHostMalloc
(
&
result
,
sz
)
:
hipMalloc
(
&
result
,
sz
);
if
(
status
!=
hipSuccess
)
{
if
(
host
)
MIGRAPH_THROW
(
"Gpu allocation failed: "
+
hip_error
(
status
));
else
allocate_gpu
(
sz
,
true
);
}
return
hip_ptr
{
result
};
}
...
...
@@ -40,24 +55,24 @@ std::vector<T> read_from_gpu(const void* x, std::size_t sz)
return
result
;
}
hip_ptr
write_to_gpu
(
const
void
*
x
,
std
::
size_t
sz
)
hip_ptr
write_to_gpu
(
const
void
*
x
,
std
::
size_t
sz
,
bool
host
=
false
)
{
auto
result
=
allocate_gpu
(
sz
);
auto
result
=
allocate_gpu
(
sz
,
host
);
auto
status
=
hipMemcpy
(
result
.
get
(),
x
,
sz
,
hipMemcpyHostToDevice
);
if
(
status
!=
hipSuccess
)
MIGRAPH_THROW
(
"Copy to gpu failed: "
+
hip_error
(
status
));
return
result
;
}
argument
allocate_gpu
(
shape
s
)
argument
allocate_gpu
(
shape
s
,
bool
host
)
{
auto
p
=
share
(
allocate_gpu
(
s
.
bytes
()
+
1
));
auto
p
=
share
(
allocate_gpu
(
s
.
bytes
()
+
1
,
host
));
return
{
s
,
[
p
]()
mutable
{
return
reinterpret_cast
<
char
*>
(
p
.
get
());
}};
}
argument
to_gpu
(
argument
arg
)
argument
to_gpu
(
argument
arg
,
bool
host
)
{
auto
p
=
share
(
write_to_gpu
(
arg
.
data
(),
arg
.
get_shape
().
bytes
()));
auto
p
=
share
(
write_to_gpu
(
arg
.
data
(),
arg
.
get_shape
().
bytes
()
,
host
));
return
{
arg
.
get_shape
(),
[
p
]()
mutable
{
return
reinterpret_cast
<
char
*>
(
p
.
get
());
}};
}
...
...
src/targets/gpu/include/migraph/gpu/hip.hpp
View file @
84242aae
...
...
@@ -6,9 +6,9 @@
namespace
migraph
{
namespace
gpu
{
migraph
::
argument
allocate_gpu
(
migraph
::
shape
s
);
migraph
::
argument
allocate_gpu
(
migraph
::
shape
s
,
bool
host
=
false
);
migraph
::
argument
to_gpu
(
migraph
::
argument
arg
);
migraph
::
argument
to_gpu
(
migraph
::
argument
arg
,
bool
host
=
false
);
migraph
::
argument
from_gpu
(
migraph
::
argument
arg
);
...
...
test/gpu/miopen.cpp
View file @
84242aae
...
...
@@ -272,6 +272,29 @@ struct test_transpose
}
};
struct
test_batchnorm_inference_2
{
const
size_t
width
=
14
;
const
size_t
height
=
14
;
const
size_t
channels
=
256
;
const
size_t
batches
=
1
;
migraph
::
program
create_program
()
const
{
migraph
::
program
p
;
migraph
::
shape
s
{
migraph
::
shape
::
float_type
,
{
batches
,
channels
,
height
,
width
}};
migraph
::
shape
vars
{
migraph
::
shape
::
float_type
,
{
channels
}};
auto
x
=
p
.
add_parameter
(
"x"
,
s
);
auto
mean
=
p
.
add_parameter
(
"mean"
,
vars
);
auto
variance
=
p
.
add_parameter
(
"variance"
,
vars
);
auto
scale
=
p
.
add_parameter
(
"scale"
,
vars
);
auto
bias
=
p
.
add_parameter
(
"bias"
,
vars
);
p
.
add_instruction
(
migraph
::
batch_norm_inference
{},
x
,
mean
,
variance
,
scale
,
bias
);
return
p
;
}
};
struct
test_batchnorm_inference
{
const
size_t
width
=
3
;
...
...
@@ -309,4 +332,5 @@ int main()
verify_program
<
test_contiguous
>
();
verify_program
<
test_transpose
>
();
verify_program
<
test_batchnorm_inference
>
();
verify_program
<
test_batchnorm_inference_2
>
();
}
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment