Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
bitsandbytes
Commits
223fea51
You need to sign in or sign up before continuing.
Commit
223fea51
authored
Jul 14, 2025
by
Egor Krivov
Browse files
Add no_cpu for optimizers
parent
3b89a05e
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
5 additions
and
5 deletions
+5
-5
tests/helpers.py
tests/helpers.py
+2
-2
tests/test_optim.py
tests/test_optim.py
+3
-3
No files found.
tests/helpers.py
View file @
223fea51
...
@@ -18,12 +18,12 @@ BOOLEAN_TUPLES = list(product(TRUE_FALSE, repeat=2)) # all combinations of (boo
...
@@ -18,12 +18,12 @@ BOOLEAN_TUPLES = list(product(TRUE_FALSE, repeat=2)) # all combinations of (boo
@
functools
.
cache
@
functools
.
cache
def
get_available_devices
():
def
get_available_devices
(
no_cpu
=
False
):
if
"BNB_TEST_DEVICE"
in
os
.
environ
:
if
"BNB_TEST_DEVICE"
in
os
.
environ
:
# If the environment variable is set, use it directly.
# If the environment variable is set, use it directly.
return
[
os
.
environ
[
"BNB_TEST_DEVICE"
]]
return
[
os
.
environ
[
"BNB_TEST_DEVICE"
]]
devices
=
[]
if
HIP_ENVIRONMENT
else
[
"cpu"
]
devices
=
[]
if
HIP_ENVIRONMENT
else
[
"cpu"
]
if
not
no_cpu
else
[]
if
hasattr
(
torch
,
"accelerator"
):
if
hasattr
(
torch
,
"accelerator"
):
# PyTorch 2.6+ - determine accelerator using agnostic API.
# PyTorch 2.6+ - determine accelerator using agnostic API.
...
...
tests/test_optim.py
View file @
223fea51
...
@@ -169,7 +169,7 @@ optimizer_names_32bit = [
...
@@ -169,7 +169,7 @@ optimizer_names_32bit = [
@
pytest
.
mark
.
parametrize
(
"gtype"
,
[
torch
.
float32
,
torch
.
float16
,
torch
.
bfloat16
],
ids
=
describe_dtype
)
@
pytest
.
mark
.
parametrize
(
"gtype"
,
[
torch
.
float32
,
torch
.
float16
,
torch
.
bfloat16
],
ids
=
describe_dtype
)
@
pytest
.
mark
.
parametrize
(
"dim1"
,
[
1024
],
ids
=
id_formatter
(
"dim1"
))
@
pytest
.
mark
.
parametrize
(
"dim1"
,
[
1024
],
ids
=
id_formatter
(
"dim1"
))
@
pytest
.
mark
.
parametrize
(
"dim2"
,
[
32
,
1024
,
4097
,
1
],
ids
=
id_formatter
(
"dim2"
))
@
pytest
.
mark
.
parametrize
(
"dim2"
,
[
32
,
1024
,
4097
,
1
],
ids
=
id_formatter
(
"dim2"
))
@
pytest
.
mark
.
parametrize
(
"device"
,
get_available_devices
(),
ids
=
id_formatter
(
"device"
))
@
pytest
.
mark
.
parametrize
(
"device"
,
get_available_devices
(
no_cpu
=
True
),
ids
=
id_formatter
(
"device"
))
def
test_optimizer32bit
(
dim1
,
dim2
,
gtype
,
optim_name
,
device
):
def
test_optimizer32bit
(
dim1
,
dim2
,
gtype
,
optim_name
,
device
):
if
optim_name
.
startswith
(
"paged_"
)
and
sys
.
platform
==
"win32"
:
if
optim_name
.
startswith
(
"paged_"
)
and
sys
.
platform
==
"win32"
:
pytest
.
skip
(
"Paged optimizers can have issues on Windows."
)
pytest
.
skip
(
"Paged optimizers can have issues on Windows."
)
...
@@ -249,7 +249,7 @@ def test_optimizer32bit(dim1, dim2, gtype, optim_name, device):
...
@@ -249,7 +249,7 @@ def test_optimizer32bit(dim1, dim2, gtype, optim_name, device):
@
pytest
.
mark
.
parametrize
(
"dim1"
,
[
1024
],
ids
=
id_formatter
(
"dim1"
))
@
pytest
.
mark
.
parametrize
(
"dim1"
,
[
1024
],
ids
=
id_formatter
(
"dim1"
))
@
pytest
.
mark
.
parametrize
(
"dim2"
,
[
32
,
1024
,
4097
],
ids
=
id_formatter
(
"dim2"
))
@
pytest
.
mark
.
parametrize
(
"dim2"
,
[
32
,
1024
,
4097
],
ids
=
id_formatter
(
"dim2"
))
@
pytest
.
mark
.
parametrize
(
"gtype"
,
[
torch
.
float32
,
torch
.
float16
],
ids
=
describe_dtype
)
@
pytest
.
mark
.
parametrize
(
"gtype"
,
[
torch
.
float32
,
torch
.
float16
],
ids
=
describe_dtype
)
@
pytest
.
mark
.
parametrize
(
"device"
,
get_available_devices
())
@
pytest
.
mark
.
parametrize
(
"device"
,
get_available_devices
(
no_cpu
=
True
))
def
test_global_config
(
dim1
,
dim2
,
gtype
,
device
):
def
test_global_config
(
dim1
,
dim2
,
gtype
,
device
):
if
dim1
==
1
and
dim2
==
1
:
if
dim1
==
1
and
dim2
==
1
:
return
return
...
@@ -305,7 +305,7 @@ optimizer_names_8bit = [
...
@@ -305,7 +305,7 @@ optimizer_names_8bit = [
@
pytest
.
mark
.
parametrize
(
"gtype"
,
[
torch
.
float32
,
torch
.
float16
,
torch
.
bfloat16
],
ids
=
describe_dtype
)
@
pytest
.
mark
.
parametrize
(
"gtype"
,
[
torch
.
float32
,
torch
.
float16
,
torch
.
bfloat16
],
ids
=
describe_dtype
)
@
pytest
.
mark
.
parametrize
(
"dim2"
,
[
32
,
1024
,
4097
],
ids
=
id_formatter
(
"dim2"
))
@
pytest
.
mark
.
parametrize
(
"dim2"
,
[
32
,
1024
,
4097
],
ids
=
id_formatter
(
"dim2"
))
@
pytest
.
mark
.
parametrize
(
"dim1"
,
[
1024
],
ids
=
id_formatter
(
"dim1"
))
@
pytest
.
mark
.
parametrize
(
"dim1"
,
[
1024
],
ids
=
id_formatter
(
"dim1"
))
@
pytest
.
mark
.
parametrize
(
"device"
,
get_available_devices
())
@
pytest
.
mark
.
parametrize
(
"device"
,
get_available_devices
(
no_cpu
=
True
))
def
test_optimizer8bit
(
dim1
,
dim2
,
gtype
,
optim_name
,
device
):
def
test_optimizer8bit
(
dim1
,
dim2
,
gtype
,
optim_name
,
device
):
torch
.
set_printoptions
(
precision
=
6
)
torch
.
set_printoptions
(
precision
=
6
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment