Commit c8c37dbd authored by 0x3f3f3f3fun's avatar 0x3f3f3f3fun
Browse files

upload models to BaiduNetdisk

parent c98fd062
...@@ -79,12 +79,12 @@ pip install -r requirements.txt ...@@ -79,12 +79,12 @@ pip install -r requirements.txt
## <a name="pretrained_models"></a>:dna:Pretrained Models ## <a name="pretrained_models"></a>:dna:Pretrained Models
| Model Name | Description | | Model Name | Description | HuggingFace | BaiduNetdisk |
| :--------- | :---------- | | :--------- | :---------- | :---------- | :---------- |
| [general_swinir_v1.ckpt](https://huggingface.co/lxq007/DiffBIR/resolve/main/general_swinir_v1.ckpt) | Stage1 model (SwinIR) for general image restoration. | | general_swinir_v1.ckpt | Stage1 model (SwinIR) for general image restoration. | [download](https://huggingface.co/lxq007/DiffBIR/resolve/main/general_swinir_v1.ckpt) | [download](https://pan.baidu.com/s/1uvSvJgcoL_Knj0h22-9TvA?pwd=v3v6) (pwd: v3v6) |
| [general_full_v1.ckpt](https://huggingface.co/lxq007/DiffBIR/resolve/main/general_full_v1.ckpt) | Full model for general image restoration. "Full" means it contains both the stage1 and stage2 model. | | general_full_v1.ckpt | Full model for general image restoration. "Full" means it contains both the stage1 and stage2 model. | [download](https://huggingface.co/lxq007/DiffBIR/resolve/main/general_full_v1.ckpt) | [download](https://pan.baidu.com/s/1gLvW1nvkJStdVAKROqaYaA?pwd=86zi) (pwd: 86zi) |
| [face_swinir_v1.ckpt](https://huggingface.co/lxq007/DiffBIR/resolve/main/face_swinir_v1.ckpt) | Stage1 model (SwinIR) for face restoration. | | face_swinir_v1.ckpt | Stage1 model (SwinIR) for face restoration. | [download](https://huggingface.co/lxq007/DiffBIR/resolve/main/face_swinir_v1.ckpt) | [download](https://pan.baidu.com/s/1cnBBC8437BJiM3q6suaK8g?pwd=xk5u) (pwd: xk5u) |
| [face_full_v1.ckpt](https://huggingface.co/lxq007/DiffBIR/resolve/main/face_full_v1.ckpt) | Full model for face restoration. | | face_full_v1.ckpt | Full model for face restoration. | [download](https://huggingface.co/lxq007/DiffBIR/resolve/main/face_full_v1.ckpt) | [download](https://pan.baidu.com/s/1pc04xvQybkynRfzK5Y8K0Q?pwd=ov8i) (pwd: ov8i) |
## <a name="quick_start"></a>:flight_departure:Quick Start ## <a name="quick_start"></a>:flight_departure:Quick Start
......
...@@ -38,7 +38,7 @@ def process( ...@@ -38,7 +38,7 @@ def process(
control_imgs (List[np.ndarray]): A list of low-quality images (HWC, RGB, range in [0, 255]) control_imgs (List[np.ndarray]): A list of low-quality images (HWC, RGB, range in [0, 255])
sampler (str): Sampler name. sampler (str): Sampler name.
steps (int): Sampling steps. steps (int): Sampling steps.
strength (float): Control strength. Set to 1.0 during traning. strength (float): Control strength. Set to 1.0 during training.
color_fix_type (str): Type of color correction for samples. color_fix_type (str): Type of color correction for samples.
disable_preprocess_model (bool): If specified, preprocess model (SwinIR) will not be used. disable_preprocess_model (bool): If specified, preprocess model (SwinIR) will not be used.
...@@ -150,63 +150,63 @@ def main() -> None: ...@@ -150,63 +150,63 @@ def main() -> None:
assert os.path.isdir(args.input) assert os.path.isdir(args.input)
print(f"sampling {args.steps} steps using {args.sampler} sampler") print(f"sampling {args.steps} steps using {args.sampler} sampler")
for file_path in list_image_files(args.input, follow_links=True): with torch.autocast(device):
lq = Image.open(file_path).convert("RGB") for file_path in list_image_files(args.input, follow_links=True):
if args.sr_scale != 1: lq = Image.open(file_path).convert("RGB")
lq = lq.resize( if args.sr_scale != 1:
tuple(math.ceil(x * args.sr_scale) for x in lq.size), lq = lq.resize(
Image.BICUBIC tuple(math.ceil(x * args.sr_scale) for x in lq.size),
) Image.BICUBIC
lq_resized = auto_resize(lq, args.image_size)
x = pad(np.array(lq_resized), scale=64)
for i in range(args.repeat_times):
save_path = os.path.join(args.output, os.path.relpath(file_path, args.input))
parent_path, stem, _ = get_file_name_parts(save_path)
save_path = os.path.join(parent_path, f"{stem}_{i}.png")
if os.path.exists(save_path):
if args.skip_if_exist:
print(f"skip {save_path}")
continue
else:
raise RuntimeError(f"{save_path} already exist")
os.makedirs(parent_path, exist_ok=True)
try:
preds, stage1_preds = process(
model, [x], steps=args.steps, sampler=args.sampler,
strength=1,
color_fix_type=args.color_fix_type,
disable_preprocess_model=args.disable_preprocess_model
) )
except RuntimeError as e: lq_resized = auto_resize(lq, args.image_size)
# Avoid cuda_out_of_memory error. x = pad(np.array(lq_resized), scale=64)
print(f"{file_path}, error: {e}")
continue
pred, stage1_pred = preds[0], stage1_preds[0]
# remove padding for i in range(args.repeat_times):
pred = pred[:lq_resized.height, :lq_resized.width, :] save_path = os.path.join(args.output, os.path.relpath(file_path, args.input))
stage1_pred = stage1_pred[:lq_resized.height, :lq_resized.width, :] parent_path, stem, _ = get_file_name_parts(save_path)
save_path = os.path.join(parent_path, f"{stem}_{i}.png")
if args.show_lq: if os.path.exists(save_path):
if args.resize_back: if args.skip_if_exist:
if lq_resized.size != lq.size: print(f"skip {save_path}")
pred = np.array(Image.fromarray(pred).resize(lq.size, Image.LANCZOS)) continue
stage1_pred = np.array(Image.fromarray(stage1_pred).resize(lq.size, Image.LANCZOS)) else:
lq = np.array(lq) raise RuntimeError(f"{save_path} already exist")
else: os.makedirs(parent_path, exist_ok=True)
lq = np.array(lq_resized)
images = [lq, pred] if args.disable_preprocess_model else [lq, stage1_pred, pred] try:
Image.fromarray(np.concatenate(images, axis=1)).save(save_path) preds, stage1_preds = process(
else: model, [x], steps=args.steps, sampler=args.sampler,
if args.resize_back and lq_resized.size != lq.size: strength=1,
Image.fromarray(pred).resize(lq.size, Image.LANCZOS).save(save_path) color_fix_type=args.color_fix_type,
disable_preprocess_model=args.disable_preprocess_model
)
except RuntimeError as e:
# Avoid cuda_out_of_memory error.
print(f"{file_path}, error: {e}")
continue
pred, stage1_pred = preds[0], stage1_preds[0]
# remove padding
pred = pred[:lq_resized.height, :lq_resized.width, :]
stage1_pred = stage1_pred[:lq_resized.height, :lq_resized.width, :]
if args.show_lq:
if args.resize_back:
if lq_resized.size != lq.size:
pred = np.array(Image.fromarray(pred).resize(lq.size, Image.LANCZOS))
stage1_pred = np.array(Image.fromarray(stage1_pred).resize(lq.size, Image.LANCZOS))
lq = np.array(lq)
else:
lq = np.array(lq_resized)
images = [lq, pred] if args.disable_preprocess_model else [lq, stage1_pred, pred]
Image.fromarray(np.concatenate(images, axis=1)).save(save_path)
else: else:
Image.fromarray(pred).save(save_path) if args.resize_back and lq_resized.size != lq.size:
print(f"save to {save_path}") Image.fromarray(pred).resize(lq.size, Image.LANCZOS).save(save_path)
else:
Image.fromarray(pred).save(save_path)
print(f"save to {save_path}")
if __name__ == "__main__": if __name__ == "__main__":
main() main()
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment