Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
bitsandbytes
Commits
51294d90
"git@developer.sourcefind.cn:OpenDAS/pytorch3d.git" did not exist on "3b2300641ae2e8ac8f8c8a3b5b3363fa204d6417"
Unverified
Commit
51294d90
authored
Sep 30, 2024
by
Matthew Douglas
Committed by
GitHub
Sep 30, 2024
Browse files
Fix optimizer support for Python <= 3.9 (#1379)
parent
776140a5
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
2 additions
and
3 deletions
+2
-3
bitsandbytes/optim/optimizer.py
bitsandbytes/optim/optimizer.py
+2
-3
No files found.
bitsandbytes/optim/optimizer.py
View file @
51294d90
...
@@ -173,7 +173,7 @@ class Optimizer8bit(torch.optim.Optimizer):
...
@@ -173,7 +173,7 @@ class Optimizer8bit(torch.optim.Optimizer):
raise
ValueError
(
"loaded state dict has a different number of parameter groups"
)
raise
ValueError
(
"loaded state dict has a different number of parameter groups"
)
param_lens
=
(
len
(
g
[
"params"
])
for
g
in
groups
)
param_lens
=
(
len
(
g
[
"params"
])
for
g
in
groups
)
saved_lens
=
(
len
(
g
[
"params"
])
for
g
in
saved_groups
)
saved_lens
=
(
len
(
g
[
"params"
])
for
g
in
saved_groups
)
if
any
(
p_len
!=
s_len
for
p_len
,
s_len
in
zip
(
param_lens
,
saved_lens
,
strict
=
True
)):
if
any
(
p_len
!=
s_len
for
p_len
,
s_len
in
zip
(
param_lens
,
saved_lens
)):
raise
ValueError
(
raise
ValueError
(
"loaded state dict contains a parameter group that doesn't match the size of optimizer's group"
,
"loaded state dict contains a parameter group that doesn't match the size of optimizer's group"
,
)
)
...
@@ -184,7 +184,6 @@ class Optimizer8bit(torch.optim.Optimizer):
...
@@ -184,7 +184,6 @@ class Optimizer8bit(torch.optim.Optimizer):
for
old_id
,
p
in
zip
(
for
old_id
,
p
in
zip
(
chain
.
from_iterable
(
g
[
"params"
]
for
g
in
saved_groups
),
chain
.
from_iterable
(
g
[
"params"
]
for
g
in
saved_groups
),
chain
.
from_iterable
(
g
[
"params"
]
for
g
in
groups
),
chain
.
from_iterable
(
g
[
"params"
]
for
g
in
groups
),
strict
=
True
,
)
)
}
}
...
@@ -226,7 +225,7 @@ class Optimizer8bit(torch.optim.Optimizer):
...
@@ -226,7 +225,7 @@ class Optimizer8bit(torch.optim.Optimizer):
new_group
[
"params"
]
=
group
[
"params"
]
new_group
[
"params"
]
=
group
[
"params"
]
return
new_group
return
new_group
param_groups
=
[
update_group
(
g
,
ng
)
for
g
,
ng
in
zip
(
groups
,
saved_groups
,
strict
=
True
)]
param_groups
=
[
update_group
(
g
,
ng
)
for
g
,
ng
in
zip
(
groups
,
saved_groups
)]
self
.
__setstate__
({
"state"
:
state
,
"param_groups"
:
param_groups
})
self
.
__setstate__
({
"state"
:
state
,
"param_groups"
:
param_groups
})
def
to_gpu
(
self
):
def
to_gpu
(
self
):
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment