Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
bitsandbytes
Commits
ee5b947e
"git@developer.sourcefind.cn:OpenDAS/vision.git" did not exist on "cde7ff014dceda46f2b60183f3f3856bdb94f7c0"
Commit
ee5b947e
authored
Aug 23, 2022
by
Tim Dettmers
Browse files
Fixed issue where Pascal was not displaying proper error.
parent
7e0fb655
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
7 additions
and
22 deletions
+7
-22
bitsandbytes/functional.py
bitsandbytes/functional.py
+2
-21
csrc/ops.cu
csrc/ops.cu
+5
-1
No files found.
bitsandbytes/functional.py
View file @
ee5b947e
...
@@ -185,14 +185,9 @@ def create_dynamic_map(signed=True, n=7):
...
@@ -185,14 +185,9 @@ def create_dynamic_map(signed=True, n=7):
def
get_special_format_str
():
def
get_special_format_str
():
if
not
torch
.
cuda
.
is_available
():
return
'col_turing'
major
,
minor
=
torch
.
cuda
.
get_device_capability
()
major
,
minor
=
torch
.
cuda
.
get_device_capability
()
if
major
<
7
:
if
major
<=
7
:
print
(
f
"Device with CUDA capability of
{
major
}
not supported for 8-bit matmul. Device has no tensor cores!"
)
assert
major
>=
7
if
major
==
7
:
return
"col_turing"
return
"col_turing"
elif
major
==
8
:
elif
major
==
8
:
return
"col_ampere"
return
"col_ampere"
...
@@ -1685,20 +1680,6 @@ def double_quant(
...
@@ -1685,20 +1680,6 @@ def double_quant(
return
out_row
,
out_col
,
row_stats
,
col_stats
,
coo_tensor
return
out_row
,
out_col
,
row_stats
,
col_stats
,
coo_tensor
def
get_special_format_str
():
if
not
torch
.
cuda
.
is_available
():
return
'col_turning'
major
,
minor
=
torch
.
cuda
.
get_device_capability
()
if
major
<
7
:
print
(
f
"Device with CUDA capability of
{
major
}
not supported for 8-bit matmul. Device has no tensor cores!"
)
assert
major
>=
7
if
major
==
7
:
return
'col_turing'
elif
major
==
8
:
return
'col_ampere'
else
:
return
'col_turing'
def
transform
(
A
,
to_order
,
from_order
=
'row'
,
out
=
None
,
transpose
=
False
,
state
=
None
,
ld
=
None
):
def
transform
(
A
,
to_order
,
from_order
=
'row'
,
out
=
None
,
transpose
=
False
,
state
=
None
,
ld
=
None
):
prev_device
=
pre_call
(
A
.
device
)
prev_device
=
pre_call
(
A
.
device
)
if
state
is
None
:
state
=
(
A
.
shape
,
from_order
)
if
state
is
None
:
state
=
(
A
.
shape
,
from_order
)
...
...
csrc/ops.cu
View file @
ee5b947e
...
@@ -371,7 +371,11 @@ template void transform<int32_t, COL32, ROW, false, 32>(cublasLtHandle_t ltHandl
...
@@ -371,7 +371,11 @@ template void transform<int32_t, COL32, ROW, false, 32>(cublasLtHandle_t ltHandl
template
<
int
FORMATB
,
int
DTYPE_OUT
,
int
SCALE_ROWS
>
int
igemmlt
(
cublasLtHandle_t
ltHandle
,
int
m
,
int
n
,
int
k
,
const
int8_t
*
A
,
const
int8_t
*
B
,
void
*
C
,
float
*
row_scale
,
int
lda
,
int
ldb
,
int
ldc
)
template
<
int
FORMATB
,
int
DTYPE_OUT
,
int
SCALE_ROWS
>
int
igemmlt
(
cublasLtHandle_t
ltHandle
,
int
m
,
int
n
,
int
k
,
const
int8_t
*
A
,
const
int8_t
*
B
,
void
*
C
,
float
*
row_scale
,
int
lda
,
int
ldb
,
int
ldc
)
{
{
#ifdef NO_CUBLASLT
#ifdef NO_CUBLASLT
printf
(
"ERROR: Your GPU does not support Int8 Matmul!"
);
cout
<<
""
<<
endl
;
cout
<<
"============================================="
<<
endl
;
cout
<<
"ERROR: Your GPU does not support Int8 Matmul!"
<<
endl
;
cout
<<
"============================================="
<<
endl
;
cout
<<
""
<<
endl
;
assert
(
false
);
assert
(
false
);
return
0
;
return
0
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment