Unverified Commit 706ec24d authored by Aarni Koskela's avatar Aarni Koskela Committed by GitHub
Browse files

Ruff fixes (#984)



* Adjust Ruff configuration

* do not autofix always
* be less strict around tests and benchmarks
* adjust ignores for now

* Ruff: autofix I and F401

* Apply ruff autofixes

* Fix RUF013 complaint

* Fix mutable default in replace_linear

* Don't use bare except

* Wrap bitsandbytes.__main__ entrypoint in function; fix "sensible" typo

* Fix ruff B008 (function call in arguments)

* Add ruff noqas as suitable

* Fix RUF005 (splat instead of concatenating)

* Fix B018 (useless expression)

* Add pre-commit configuration + GitHub Actions lint workflow

* Fix unused `e` in bitsandbytes/__main__.py

* fix merge conflict resolution error

* run pre-commit hook

---------
Co-authored-by: default avatarTitus <9048635+Titus-von-Koeller@users.noreply.github.com>
parent a8c9dfa6
import os
from contextlib import nullcontext
from itertools import product
import os
from tempfile import TemporaryDirectory
import pytest
......@@ -11,7 +11,6 @@ from bitsandbytes import functional as F
from bitsandbytes.autograd import get_inverse_transform_indices, undo_layout
from bitsandbytes.nn.modules import Linear8bitLt
# contributed by Alex Borzunov, see:
# https://github.com/bigscience-workshop/petals/blob/main/tests/test_linear8bitlt.py
......
from itertools import product
import math
import einops
import pytest
import torch
from torch import nn
......
import ctypes
from itertools import product
import os
from os.path import join
import shutil
import time
import uuid
from itertools import product
from os.path import join
import pytest
from lion_pytorch import Lion
import pytest
import torch
import bitsandbytes as bnb
......@@ -27,7 +25,7 @@ def assert_most_approx_close(a, b, rtol=1e-3, atol=1e-3, max_error_count=0):
def get_temp_dir():
path = f"/tmp/autoswap/{str(uuid.uuid4())}"
path = f"/tmp/autoswap/{uuid.uuid4()}"
os.makedirs(path, exist_ok=True)
return path
......
import pytest
import torch
from bitsandbytes.triton.triton_utils import is_triton_available
from bitsandbytes.nn.triton_based_modules import SwitchBackLinear
from bitsandbytes.nn import Linear8bitLt
from bitsandbytes.nn.triton_based_modules import SwitchBackLinear
from bitsandbytes.triton.triton_utils import is_triton_available
@pytest.mark.skipif(not is_triton_available() or not torch.cuda.is_available() or not torch.cuda.get_device_capability()[0] >= 8,
reason="This test requires triton and a GPU with compute capability 8.0 or higher.")
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment