Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
dgl
Commits
5e64481b
"...pytorch/git@developer.sourcefind.cn:OpenDAS/dgl.git" did not exist on "72b3e078af32b9b237d024526f8f25fffe088c03"
Unverified
Commit
5e64481b
authored
Dec 03, 2023
by
Muhammed Fatih BALIN
Committed by
GitHub
Dec 03, 2023
Browse files
[Graphbolt][CUDA] Simplify allocator class by discarding tensors (#6654)
parent
2968c9b2
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
8 additions
and
14 deletions
+8
-14
graphbolt/src/cuda/common.h
graphbolt/src/cuda/common.h
+8
-14
No files found.
graphbolt/src/cuda/common.h
View file @
5e64481b
...
@@ -6,6 +6,7 @@
...
@@ -6,6 +6,7 @@
#ifndef GRAPHBOLT_CUDA_COMMON_H_
#ifndef GRAPHBOLT_CUDA_COMMON_H_
#define GRAPHBOLT_CUDA_COMMON_H_
#define GRAPHBOLT_CUDA_COMMON_H_
#include <c10/cuda/CUDACachingAllocator.h>
#include <c10/cuda/CUDAException.h>
#include <c10/cuda/CUDAException.h>
#include <cuda_runtime.h>
#include <cuda_runtime.h>
#include <torch/script.h>
#include <torch/script.h>
...
@@ -34,29 +35,22 @@ namespace cuda {
...
@@ -34,29 +35,22 @@ namespace cuda {
* int_array.get() gives the raw pointer.
* int_array.get() gives the raw pointer.
*/
*/
class
CUDAWorkspaceAllocator
{
struct
CUDAWorkspaceAllocator
{
using
TensorPtrMapType
=
std
::
unordered_map
<
void
*
,
torch
::
Tensor
>
;
std
::
shared_ptr
<
TensorPtrMapType
>
ptr_map_
;
public:
// Required by thrust to satisfy allocator requirements.
// Required by thrust to satisfy allocator requirements.
using
value_type
=
char
;
using
value_type
=
char
;
explicit
CUDAWorkspaceAllocator
()
explicit
CUDAWorkspaceAllocator
()
{
at
::
globalContext
().
lazyInitCUDA
();
}
:
ptr_map_
(
std
::
make_shared
<
TensorPtrMapType
>
())
{}
CUDAWorkspaceAllocator
&
operator
=
(
const
CUDAWorkspaceAllocator
&
)
=
default
;
CUDAWorkspaceAllocator
&
operator
=
(
const
CUDAWorkspaceAllocator
&
)
=
default
;
void
operator
()(
void
*
ptr
)
const
{
ptr_map_
->
erase
(
ptr
);
}
void
operator
()(
void
*
ptr
)
const
{
c10
::
cuda
::
CUDACachingAllocator
::
raw_delete
(
ptr
);
}
// Required by thrust to satisfy allocator requirements.
// Required by thrust to satisfy allocator requirements.
value_type
*
allocate
(
std
::
ptrdiff_t
size
)
const
{
value_type
*
allocate
(
std
::
ptrdiff_t
size
)
const
{
auto
tensor
=
torch
::
empty
(
return
reinterpret_cast
<
value_type
*>
(
size
,
torch
::
TensorOptions
()
c10
::
cuda
::
CUDACachingAllocator
::
raw_alloc
(
size
));
.
dtype
(
torch
::
kByte
)
.
device
(
c10
::
DeviceType
::
CUDA
));
ptr_map_
->
operator
[](
tensor
.
data_ptr
())
=
tensor
;
return
reinterpret_cast
<
value_type
*>
(
tensor
.
data_ptr
());
}
}
// Required by thrust to satisfy allocator requirements.
// Required by thrust to satisfy allocator requirements.
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment