Commit 62c0bd22 authored by Tom Aarsen's avatar Tom Aarsen
Browse files

Fix several typos in logging and comments

Via codespell
parent d504050f
...@@ -116,7 +116,7 @@ try: ...@@ -116,7 +116,7 @@ try:
CUDASetup.get_instance().generate_instructions() CUDASetup.get_instance().generate_instructions()
CUDASetup.get_instance().print_log_stack() CUDASetup.get_instance().print_log_stack()
raise RuntimeError(''' raise RuntimeError('''
CUDA Setup failed despite GPU being available. Inspect the CUDA SETUP outputs aboveto fix your environment! CUDA Setup failed despite GPU being available. Inspect the CUDA SETUP outputs above to fix your environment!
If you cannot find any issues and suspect a bug, please open an issue with detals about your environment: If you cannot find any issues and suspect a bug, please open an issue with detals about your environment:
https://github.com/TimDettmers/bitsandbytes/issues''') https://github.com/TimDettmers/bitsandbytes/issues''')
lib.cadam32bit_g32 lib.cadam32bit_g32
......
...@@ -50,7 +50,7 @@ def get_cuda_version(cuda, cudart_path): ...@@ -50,7 +50,7 @@ def get_cuda_version(cuda, cudart_path):
minor = (version-(major*1000))//10 minor = (version-(major*1000))//10
if major < 11: if major < 11:
CUDASetup.get_instance().add_log_entry('CUDA SETUP: CUDA version lower than 11 are currenlty not supported for LLM.int8(). You will be only to use 8-bit optimizers and quantization routines!!') CUDASetup.get_instance().add_log_entry('CUDA SETUP: CUDA version lower than 11 are currently not supported for LLM.int8(). You will be only to use 8-bit optimizers and quantization routines!!')
return f'{major}{minor}' return f'{major}{minor}'
......
...@@ -56,9 +56,9 @@ class GlobalOptimManager: ...@@ -56,9 +56,9 @@ class GlobalOptimManager:
""" """
Overrides initial optimizer config for specific parameters. Overrides initial optimizer config for specific parameters.
The key-values of the optimizer config for the input parameters are overidden The key-values of the optimizer config for the input parameters are overridden
This can be both, optimizer parameters like "betas", or "lr" or it can be This can be both, optimizer parameters like "betas", or "lr" or it can be
8-bit specific paramters like "optim_bits", "percentile_clipping". 8-bit specific parameters like "optim_bits", "percentile_clipping".
Parameters Parameters
---------- ----------
...@@ -282,11 +282,11 @@ class Optimizer8bit(torch.optim.Optimizer): ...@@ -282,11 +282,11 @@ class Optimizer8bit(torch.optim.Optimizer):
return config return config
def init_state(self, group, p, gindex, pindex): def init_state(self, group, p, gindex, pindex):
raise NotImplementedError("init_state method needs to be overidden") raise NotImplementedError("init_state method needs to be overridden")
def update_step(self, group, p, gindex, pindex): def update_step(self, group, p, gindex, pindex):
raise NotImplementedError( raise NotImplementedError(
"The update_step method needs to be overidden" "The update_step method needs to be overridden"
) )
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment