"torchvision/vscode:/vscode.git/clone" did not exist on "8aec85deb447d415abc0788d8dc301a727cc69fb"
Unverified Commit db969cc1 authored by Sai-Suraj-27's avatar Sai-Suraj-27 Committed by GitHub
Browse files

fix: Fixed `type annotations` for compatability with python 3.8 (#7648)

* Fixed type annotations for compatability with python 3.8

* Add required imports.
parent 3cfe187d
......@@ -151,7 +151,7 @@ def concat_first(feat: torch.Tensor, dim: int = 2, scale: float = 1.0) -> torch.
return torch.cat((feat, feat_style), dim=dim)
def calc_mean_std(feat: torch.Tensor, eps: float = 1e-5) -> tuple[torch.Tensor, torch.Tensor]:
def calc_mean_std(feat: torch.Tensor, eps: float = 1e-5) -> Tuple[torch.Tensor, torch.Tensor]:
feat_std = (feat.var(dim=-2, keepdims=True) + eps).sqrt()
feat_mean = feat.mean(dim=-2, keepdims=True)
return feat_mean, feat_std
......
......@@ -17,7 +17,7 @@
import inspect
from collections.abc import Callable
from typing import Any, List, Optional, Union
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import PIL
......@@ -1211,8 +1211,8 @@ class StableDiffusionXLControlNetAdapterInpaintPipeline(
@replace_example_docstring(EXAMPLE_DOC_STRING)
def __call__(
self,
prompt: Optional[Union[str, list[str]]] = None,
prompt_2: Optional[Union[str, list[str]]] = None,
prompt: Optional[Union[str, List[str]]] = None,
prompt_2: Optional[Union[str, List[str]]] = None,
image: Optional[Union[torch.Tensor, PIL.Image.Image]] = None,
mask_image: Optional[Union[torch.Tensor, PIL.Image.Image]] = None,
adapter_image: PipelineImageInput = None,
......@@ -1224,11 +1224,11 @@ class StableDiffusionXLControlNetAdapterInpaintPipeline(
denoising_start: Optional[float] = None,
denoising_end: Optional[float] = None,
guidance_scale: float = 5.0,
negative_prompt: Optional[Union[str, list[str]]] = None,
negative_prompt_2: Optional[Union[str, list[str]]] = None,
negative_prompt: Optional[Union[str, List[str]]] = None,
negative_prompt_2: Optional[Union[str, List[str]]] = None,
num_images_per_prompt: Optional[int] = 1,
eta: float = 0.0,
generator: Optional[Union[torch.Generator, list[torch.Generator]]] = None,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
latents: Optional[Union[torch.FloatTensor]] = None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
......@@ -1238,12 +1238,12 @@ class StableDiffusionXLControlNetAdapterInpaintPipeline(
return_dict: bool = True,
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
callback_steps: int = 1,
cross_attention_kwargs: Optional[dict[str, Any]] = None,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
guidance_rescale: float = 0.0,
original_size: Optional[tuple[int, int]] = None,
crops_coords_top_left: Optional[tuple[int, int]] = (0, 0),
target_size: Optional[tuple[int, int]] = None,
adapter_conditioning_scale: Optional[Union[float, list[float]]] = 1.0,
original_size: Optional[Tuple[int, int]] = None,
crops_coords_top_left: Optional[Tuple[int, int]] = (0, 0),
target_size: Optional[Tuple[int, int]] = None,
adapter_conditioning_scale: Optional[Union[float, List[float]]] = 1.0,
cond_tau: float = 1.0,
aesthetic_score: float = 6.0,
negative_aesthetic_score: float = 2.5,
......
......@@ -637,7 +637,7 @@ def _filter2d(input, kernel):
height, width = tmp_kernel.shape[-2:]
padding_shape: list[int] = _compute_padding([height, width])
padding_shape: List[int] = _compute_padding([height, width])
input = torch.nn.functional.pad(input, padding_shape, mode="reflect")
# kernel and input tensor reshape to align element-wise or batch-wise params
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment