Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
bitsandbytes
Commits
35ce337b
Commit
35ce337b
authored
Jul 11, 2025
by
Egor Krivov
Browse files
Fixed bugs
parent
b43edf56
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
4 additions
and
3 deletions
+4
-3
bitsandbytes/backends/cuda/ops.py
bitsandbytes/backends/cuda/ops.py
+1
-1
bitsandbytes/optim/optimizer.py
bitsandbytes/optim/optimizer.py
+3
-2
No files found.
bitsandbytes/backends/cuda/ops.py
View file @
35ce337b
...
@@ -579,7 +579,7 @@ def _optimizer_update_8bit_blockwise_impl(
...
@@ -579,7 +579,7 @@ def _optimizer_update_8bit_blockwise_impl(
g
:
torch
.
Tensor
,
g
:
torch
.
Tensor
,
p
:
torch
.
Tensor
,
p
:
torch
.
Tensor
,
state1
:
torch
.
Tensor
,
state1
:
torch
.
Tensor
,
state2
:
Optional
[
torch
.
nsor
],
state2
:
Optional
[
torch
.
Te
nsor
],
beta1
:
float
,
beta1
:
float
,
beta2
:
float
,
beta2
:
float
,
beta3
:
float
,
beta3
:
float
,
...
...
bitsandbytes/optim/optimizer.py
View file @
35ce337b
...
@@ -280,6 +280,7 @@ class Optimizer8bit(torch.optim.Optimizer):
...
@@ -280,6 +280,7 @@ class Optimizer8bit(torch.optim.Optimizer):
self
.
initialized
=
True
self
.
initialized
=
True
# if self.is_paged: self.page_mng.prefetch_all()
# if self.is_paged: self.page_mng.prefetch_all()
p
=
None
for
gindex
,
group
in
enumerate
(
self
.
param_groups
):
for
gindex
,
group
in
enumerate
(
self
.
param_groups
):
for
pindex
,
p
in
enumerate
(
group
[
"params"
]):
for
pindex
,
p
in
enumerate
(
group
[
"params"
]):
if
p
.
grad
is
None
:
if
p
.
grad
is
None
:
...
@@ -291,10 +292,10 @@ class Optimizer8bit(torch.optim.Optimizer):
...
@@ -291,10 +292,10 @@ class Optimizer8bit(torch.optim.Optimizer):
self
.
prefetch_state
(
p
)
self
.
prefetch_state
(
p
)
self
.
update_step
(
group
,
p
,
gindex
,
pindex
)
self
.
update_step
(
group
,
p
,
gindex
,
pindex
)
sync_gpu
(
p
)
sync_gpu
(
p
)
if
self
.
is_paged
:
if
self
.
is_paged
and
p
is
not
None
:
# all paged operations are asynchronous, we need
# all paged operations are asynchronous, we need
# to sync to make sure all tensors are in the right state
# to sync to make sure all tensors are in the right state
sync_gpu
(
loss
)
sync_gpu
(
p
)
return
loss
return
loss
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment