"git@developer.sourcefind.cn:zhaoyu6/sglang.git" did not exist on "85986bb97808105840704304f906b2d3757f82e9"
Unverified Commit 6ca9c76a authored by Philip Meier's avatar Philip Meier Committed by GitHub
Browse files

Upgrade usort to `1.0.2` and black to 22.3.0 (#5106)



* upgrade usort to

* Also update black

* Actually use 1.0.2

* Apply pre-commit
Co-authored-by: default avatarNicolas Hug <contact@nicolas-hug.com>
parent 9293be7e
from typing import Tuple, Optional
from typing import Optional, Tuple
import PIL.Image
import torch
from torchvision.prototype.features import BoundingBoxFormat, ColorSpace
from torchvision.transforms import functional_tensor as _FT, functional_pil as _FP
from torchvision.transforms import functional_pil as _FP, functional_tensor as _FT
get_dimensions_image_tensor = _FT.get_dimensions
get_dimensions_image_pil = _FP.get_dimensions
......
from typing import Optional, List
from typing import List, Optional
import PIL.Image
import torch
......
import unittest.mock
from typing import Dict, Any, Tuple, Union
from typing import Any, Dict, Tuple, Union
import numpy as np
import PIL.Image
......
......@@ -3,18 +3,7 @@ import difflib
import io
import mmap
import platform
from typing import (
Any,
BinaryIO,
Callable,
Collection,
Iterator,
Sequence,
Tuple,
TypeVar,
Union,
Optional,
)
from typing import Any, BinaryIO, Callable, Collection, Iterator, Optional, Sequence, Tuple, TypeVar, Union
import numpy as np
import torch
......
......@@ -5,7 +5,7 @@ future versions without warning. The classes should be accessed only via the tra
from typing import Optional, Tuple
import torch
from torch import Tensor, nn
from torch import nn, Tensor
from . import functional as F, InterpolationMode
......
......@@ -4,10 +4,7 @@ import numbers
import random
import warnings
from torchvision.transforms import (
RandomCrop,
RandomResizedCrop,
)
from torchvision.transforms import RandomCrop, RandomResizedCrop
from . import _functional_video as F
......
import math
from enum import Enum
from typing import List, Tuple, Optional, Dict
from typing import Dict, List, Optional, Tuple
import torch
from torch import Tensor
......
......@@ -2,7 +2,7 @@ import math
import numbers
import warnings
from enum import Enum
from typing import List, Tuple, Any, Optional, Union
from typing import Any, List, Optional, Tuple, Union
import numpy as np
import torch
......@@ -15,8 +15,7 @@ except ImportError:
accimage = None
from ..utils import _log_api_usage_once
from . import functional_pil as F_pil
from . import functional_tensor as F_t
from . import functional_pil as F_pil, functional_tensor as F_t
class InterpolationMode(Enum):
......
import warnings
from typing import Optional, Tuple, List, Union
from typing import List, Optional, Tuple, Union
import torch
from torch import Tensor
from torch.nn.functional import grid_sample, conv2d, interpolate, pad as torch_pad
from torch.nn.functional import conv2d, grid_sample, interpolate, pad as torch_pad
def _is_tensor_a_torch_image(x: Tensor) -> bool:
......@@ -247,7 +247,7 @@ def adjust_gamma(img: Tensor, gamma: float, gain: float = 1) -> Tensor:
if not torch.is_floating_point(img):
result = convert_image_dtype(result, torch.float32)
result = (gain * result ** gamma).clamp(0, 1)
result = (gain * result**gamma).clamp(0, 1)
result = convert_image_dtype(result, dtype)
return result
......
......@@ -3,7 +3,7 @@ import numbers
import random
import warnings
from collections.abc import Sequence
from typing import Tuple, List, Optional
from typing import List, Optional, Tuple
import torch
from torch import Tensor
......@@ -15,7 +15,7 @@ except ImportError:
from ..utils import _log_api_usage_once
from . import functional as F
from .functional import InterpolationMode, _interpolation_modes_from_int
from .functional import _interpolation_modes_from_int, InterpolationMode
__all__ = [
"Compose",
......
......@@ -449,7 +449,7 @@ def flow_to_image(flow: torch.Tensor) -> torch.Tensor:
if flow.ndim != 4 or flow.shape[1] != 2:
raise ValueError(f"Input flow should have shape (2, H, W) or (N, 2, H, W), got {orig_shape}.")
max_norm = torch.sum(flow ** 2, dim=1).sqrt().max()
max_norm = torch.sum(flow**2, dim=1).sqrt().max()
epsilon = torch.finfo((flow).dtype).eps
normalized_flow = flow / (max_norm + epsilon)
img = _normalized_flow_to_image(normalized_flow)
......@@ -476,7 +476,7 @@ def _normalized_flow_to_image(normalized_flow: torch.Tensor) -> torch.Tensor:
flow_image = torch.zeros((N, 3, H, W), dtype=torch.uint8, device=device)
colorwheel = _make_colorwheel().to(device) # shape [55x3]
num_cols = colorwheel.shape[0]
norm = torch.sum(normalized_flow ** 2, dim=1).sqrt()
norm = torch.sum(normalized_flow**2, dim=1).sqrt()
a = torch.atan2(-normalized_flow[:, 1, :, :], -normalized_flow[:, 0, :, :]) / torch.pi
fk = (a + 1) / 2 * (num_cols - 1)
k0 = torch.floor(fk).to(torch.long)
......@@ -542,7 +542,7 @@ def _make_colorwheel() -> torch.Tensor:
def _generate_color_palette(num_objects: int):
palette = torch.tensor([2 ** 25 - 1, 2 ** 15 - 1, 2 ** 21 - 1])
palette = torch.tensor([2**25 - 1, 2**15 - 1, 2**21 - 1])
return [tuple((i * palette) % 255) for i in range(num_objects)]
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment