Commit d118e789 authored by Rayyyyy's avatar Rayyyyy
Browse files

First commit.

parents
Pipeline #782 failed with stages
in 0 seconds
# flake8: noqa
import os.path as osp
from basicsr.train import train_pipeline
import realesrgan.archs
import realesrgan.data
import realesrgan.models
if __name__ == '__main__':
root_path = osp.abspath(osp.join(__file__, osp.pardir, osp.pardir))
train_pipeline(root_path)
import cv2
import math
import numpy as np
import os
import queue
import threading
import torch
from basicsr.utils.download_util import load_file_from_url
from torch.nn import functional as F
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
class RealESRGANer():
"""A helper class for upsampling images with RealESRGAN.
Args:
scale (int): Upsampling scale factor used in the networks. It is usually 2 or 4.
model_path (str): The path to the pretrained model. It can be urls (will first download it automatically).
model (nn.Module): The defined network. Default: None.
tile (int): As too large images result in the out of GPU memory issue, so this tile option will first crop
input images into tiles, and then process each of them. Finally, they will be merged into one image.
0 denotes for do not use tile. Default: 0.
tile_pad (int): The pad size for each tile, to remove border artifacts. Default: 10.
pre_pad (int): Pad the input images to avoid border artifacts. Default: 10.
half (float): Whether to use half precision during inference. Default: False.
"""
def __init__(self,
scale,
model_path,
dni_weight=None,
model=None,
tile=0,
tile_pad=10,
pre_pad=10,
half=False,
device=None,
gpu_id=None):
self.scale = scale
self.tile_size = tile
self.tile_pad = tile_pad
self.pre_pad = pre_pad
self.mod_scale = None
self.half = half
# initialize model
if gpu_id:
self.device = torch.device(
f'cuda:{gpu_id}' if torch.cuda.is_available() else 'cpu') if device is None else device
else:
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') if device is None else device
if isinstance(model_path, list):
# dni
assert len(model_path) == len(dni_weight), 'model_path and dni_weight should have the save length.'
loadnet = self.dni(model_path[0], model_path[1], dni_weight)
else:
# if the model_path starts with https, it will first download models to the folder: weights
if model_path.startswith('https://'):
model_path = load_file_from_url(
url=model_path, model_dir=os.path.join(ROOT_DIR, 'weights'), progress=True, file_name=None)
loadnet = torch.load(model_path, map_location=torch.device('cpu'))
# prefer to use params_ema
if 'params_ema' in loadnet:
keyname = 'params_ema'
else:
keyname = 'params'
model.load_state_dict(loadnet[keyname], strict=True)
model.eval()
self.model = model.to(self.device)
if self.half:
self.model = self.model.half()
def dni(self, net_a, net_b, dni_weight, key='params', loc='cpu'):
"""Deep network interpolation.
``Paper: Deep Network Interpolation for Continuous Imagery Effect Transition``
"""
net_a = torch.load(net_a, map_location=torch.device(loc))
net_b = torch.load(net_b, map_location=torch.device(loc))
for k, v_a in net_a[key].items():
net_a[key][k] = dni_weight[0] * v_a + dni_weight[1] * net_b[key][k]
return net_a
def pre_process(self, img):
"""Pre-process, such as pre-pad and mod pad, so that the images can be divisible
"""
img = torch.from_numpy(np.transpose(img, (2, 0, 1))).float()
self.img = img.unsqueeze(0).to(self.device)
if self.half:
self.img = self.img.half()
# pre_pad
if self.pre_pad != 0:
self.img = F.pad(self.img, (0, self.pre_pad, 0, self.pre_pad), 'reflect')
# mod pad for divisible borders
if self.scale == 2:
self.mod_scale = 2
elif self.scale == 1:
self.mod_scale = 4
if self.mod_scale is not None:
self.mod_pad_h, self.mod_pad_w = 0, 0
_, _, h, w = self.img.size()
if (h % self.mod_scale != 0):
self.mod_pad_h = (self.mod_scale - h % self.mod_scale)
if (w % self.mod_scale != 0):
self.mod_pad_w = (self.mod_scale - w % self.mod_scale)
self.img = F.pad(self.img, (0, self.mod_pad_w, 0, self.mod_pad_h), 'reflect')
def process(self):
# model inference
self.output = self.model(self.img)
def tile_process(self):
"""It will first crop input images to tiles, and then process each tile.
Finally, all the processed tiles are merged into one images.
Modified from: https://github.com/ata4/esrgan-launcher
"""
batch, channel, height, width = self.img.shape
output_height = height * self.scale
output_width = width * self.scale
output_shape = (batch, channel, output_height, output_width)
# start with black image
self.output = self.img.new_zeros(output_shape)
tiles_x = math.ceil(width / self.tile_size)
tiles_y = math.ceil(height / self.tile_size)
# loop over all tiles
for y in range(tiles_y):
for x in range(tiles_x):
# extract tile from input image
ofs_x = x * self.tile_size
ofs_y = y * self.tile_size
# input tile area on total image
input_start_x = ofs_x
input_end_x = min(ofs_x + self.tile_size, width)
input_start_y = ofs_y
input_end_y = min(ofs_y + self.tile_size, height)
# input tile area on total image with padding
input_start_x_pad = max(input_start_x - self.tile_pad, 0)
input_end_x_pad = min(input_end_x + self.tile_pad, width)
input_start_y_pad = max(input_start_y - self.tile_pad, 0)
input_end_y_pad = min(input_end_y + self.tile_pad, height)
# input tile dimensions
input_tile_width = input_end_x - input_start_x
input_tile_height = input_end_y - input_start_y
tile_idx = y * tiles_x + x + 1
input_tile = self.img[:, :, input_start_y_pad:input_end_y_pad, input_start_x_pad:input_end_x_pad]
# upscale tile
try:
with torch.no_grad():
output_tile = self.model(input_tile)
except RuntimeError as error:
print('Error', error)
print(f'\tTile {tile_idx}/{tiles_x * tiles_y}')
# output tile area on total image
output_start_x = input_start_x * self.scale
output_end_x = input_end_x * self.scale
output_start_y = input_start_y * self.scale
output_end_y = input_end_y * self.scale
# output tile area without padding
output_start_x_tile = (input_start_x - input_start_x_pad) * self.scale
output_end_x_tile = output_start_x_tile + input_tile_width * self.scale
output_start_y_tile = (input_start_y - input_start_y_pad) * self.scale
output_end_y_tile = output_start_y_tile + input_tile_height * self.scale
# put tile into output image
self.output[:, :, output_start_y:output_end_y,
output_start_x:output_end_x] = output_tile[:, :, output_start_y_tile:output_end_y_tile,
output_start_x_tile:output_end_x_tile]
def post_process(self):
# remove extra pad
if self.mod_scale is not None:
_, _, h, w = self.output.size()
self.output = self.output[:, :, 0:h - self.mod_pad_h * self.scale, 0:w - self.mod_pad_w * self.scale]
# remove prepad
if self.pre_pad != 0:
_, _, h, w = self.output.size()
self.output = self.output[:, :, 0:h - self.pre_pad * self.scale, 0:w - self.pre_pad * self.scale]
return self.output
@torch.no_grad()
def enhance(self, img, outscale=None, alpha_upsampler='realesrgan'):
h_input, w_input = img.shape[0:2]
# img: numpy
img = img.astype(np.float32)
if np.max(img) > 256: # 16-bit image
max_range = 65535
print('\tInput is a 16-bit image')
else:
max_range = 255
img = img / max_range
if len(img.shape) == 2: # gray image
img_mode = 'L'
img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
elif img.shape[2] == 4: # RGBA image with alpha channel
img_mode = 'RGBA'
alpha = img[:, :, 3]
img = img[:, :, 0:3]
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
if alpha_upsampler == 'realesrgan':
alpha = cv2.cvtColor(alpha, cv2.COLOR_GRAY2RGB)
else:
img_mode = 'RGB'
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# ------------------- process image (without the alpha channel) ------------------- #
self.pre_process(img)
if self.tile_size > 0:
self.tile_process()
else:
self.process()
output_img = self.post_process()
output_img = output_img.data.squeeze().float().cpu().clamp_(0, 1).numpy()
output_img = np.transpose(output_img[[2, 1, 0], :, :], (1, 2, 0))
if img_mode == 'L':
output_img = cv2.cvtColor(output_img, cv2.COLOR_BGR2GRAY)
# ------------------- process the alpha channel if necessary ------------------- #
if img_mode == 'RGBA':
if alpha_upsampler == 'realesrgan':
self.pre_process(alpha)
if self.tile_size > 0:
self.tile_process()
else:
self.process()
output_alpha = self.post_process()
output_alpha = output_alpha.data.squeeze().float().cpu().clamp_(0, 1).numpy()
output_alpha = np.transpose(output_alpha[[2, 1, 0], :, :], (1, 2, 0))
output_alpha = cv2.cvtColor(output_alpha, cv2.COLOR_BGR2GRAY)
else: # use the cv2 resize for alpha channel
h, w = alpha.shape[0:2]
output_alpha = cv2.resize(alpha, (w * self.scale, h * self.scale), interpolation=cv2.INTER_LINEAR)
# merge the alpha channel
output_img = cv2.cvtColor(output_img, cv2.COLOR_BGR2BGRA)
output_img[:, :, 3] = output_alpha
# ------------------------------ return ------------------------------ #
if max_range == 65535: # 16-bit image
output = (output_img * 65535.0).round().astype(np.uint16)
else:
output = (output_img * 255.0).round().astype(np.uint8)
if outscale is not None and outscale != float(self.scale):
output = cv2.resize(
output, (
int(w_input * outscale),
int(h_input * outscale),
), interpolation=cv2.INTER_LANCZOS4)
return output, img_mode
class PrefetchReader(threading.Thread):
"""Prefetch images.
Args:
img_list (list[str]): A image list of image paths to be read.
num_prefetch_queue (int): Number of prefetch queue.
"""
def __init__(self, img_list, num_prefetch_queue):
super().__init__()
self.que = queue.Queue(num_prefetch_queue)
self.img_list = img_list
def run(self):
for img_path in self.img_list:
img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
self.que.put(img)
self.que.put(None)
def __next__(self):
next_item = self.que.get()
if next_item is None:
raise StopIteration
return next_item
def __iter__(self):
return self
class IOConsumer(threading.Thread):
def __init__(self, opt, que, qid):
super().__init__()
self._queue = que
self.qid = qid
self.opt = opt
def run(self):
while True:
msg = self._queue.get()
if isinstance(msg, str) and msg == 'quit':
break
output = msg['output']
save_path = msg['save_path']
cv2.imwrite(save_path, output)
print(f'IO worker {self.qid} is done.')
basicsr>=1.4.2
facexlib>=0.2.5
gfpgan>=1.3.5
numpy
opencv-python==4.9.0.80
Pillow
torch>=1.7
torchvision
tqdm
scipy==1.10.1
scikit-image
tb-nightly
\ No newline at end of file
import argparse
import cv2
import numpy as np
import os
import sys
from basicsr.utils import scandir
from multiprocessing import Pool
from os import path as osp
from tqdm import tqdm
def main(args):
"""A multi-thread tool to crop large images to sub-images for faster IO.
opt (dict): Configuration dict. It contains:
n_thread (int): Thread number.
compression_level (int): CV_IMWRITE_PNG_COMPRESSION from 0 to 9. A higher value means a smaller size
and longer compression time. Use 0 for faster CPU decompression. Default: 3, same in cv2.
input_folder (str): Path to the input folder.
save_folder (str): Path to save folder.
crop_size (int): Crop size.
step (int): Step for overlapped sliding window.
thresh_size (int): Threshold size. Patches whose size is lower than thresh_size will be dropped.
Usage:
For each folder, run this script.
Typically, there are GT folder and LQ folder to be processed for DIV2K dataset.
After process, each sub_folder should have the same number of subimages.
Remember to modify opt configurations according to your settings.
"""
opt = {}
opt['n_thread'] = args.n_thread
opt['compression_level'] = args.compression_level
opt['input_folder'] = args.input
opt['save_folder'] = args.output
opt['crop_size'] = args.crop_size
opt['step'] = args.step
opt['thresh_size'] = args.thresh_size
extract_subimages(opt)
def extract_subimages(opt):
"""Crop images to subimages.
Args:
opt (dict): Configuration dict. It contains:
input_folder (str): Path to the input folder.
save_folder (str): Path to save folder.
n_thread (int): Thread number.
"""
input_folder = opt['input_folder']
save_folder = opt['save_folder']
if not osp.exists(save_folder):
os.makedirs(save_folder)
print(f'mkdir {save_folder} ...')
else:
print(f'Folder {save_folder} already exists. Exit.')
sys.exit(1)
# scan all images
img_list = list(scandir(input_folder, full_path=True))
pbar = tqdm(total=len(img_list), unit='image', desc='Extract')
pool = Pool(opt['n_thread'])
for path in img_list:
pool.apply_async(worker, args=(path, opt), callback=lambda arg: pbar.update(1))
pool.close()
pool.join()
pbar.close()
print('All processes done.')
def worker(path, opt):
"""Worker for each process.
Args:
path (str): Image path.
opt (dict): Configuration dict. It contains:
crop_size (int): Crop size.
step (int): Step for overlapped sliding window.
thresh_size (int): Threshold size. Patches whose size is lower than thresh_size will be dropped.
save_folder (str): Path to save folder.
compression_level (int): for cv2.IMWRITE_PNG_COMPRESSION.
Returns:
process_info (str): Process information displayed in progress bar.
"""
crop_size = opt['crop_size']
step = opt['step']
thresh_size = opt['thresh_size']
img_name, extension = osp.splitext(osp.basename(path))
# remove the x2, x3, x4 and x8 in the filename for DIV2K
img_name = img_name.replace('x2', '').replace('x3', '').replace('x4', '').replace('x8', '')
img = cv2.imread(path, cv2.IMREAD_UNCHANGED)
h, w = img.shape[0:2]
h_space = np.arange(0, h - crop_size + 1, step)
if h - (h_space[-1] + crop_size) > thresh_size:
h_space = np.append(h_space, h - crop_size)
w_space = np.arange(0, w - crop_size + 1, step)
if w - (w_space[-1] + crop_size) > thresh_size:
w_space = np.append(w_space, w - crop_size)
index = 0
for x in h_space:
for y in w_space:
index += 1
cropped_img = img[x:x + crop_size, y:y + crop_size, ...]
cropped_img = np.ascontiguousarray(cropped_img)
cv2.imwrite(
osp.join(opt['save_folder'], f'{img_name}_s{index:03d}{extension}'), cropped_img,
[cv2.IMWRITE_PNG_COMPRESSION, opt['compression_level']])
process_info = f'Processing {img_name} ...'
return process_info
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--input', type=str, default='datasets/DF2K/DF2K_HR', help='Input folder')
parser.add_argument('--output', type=str, default='datasets/DF2K/DF2K_HR_sub', help='Output folder')
parser.add_argument('--crop_size', type=int, default=480, help='Crop size')
parser.add_argument('--step', type=int, default=240, help='Step for overlapped sliding window')
parser.add_argument(
'--thresh_size',
type=int,
default=0,
help='Threshold size. Patches whose size is lower than thresh_size will be dropped.')
parser.add_argument('--n_thread', type=int, default=20, help='Thread number.')
parser.add_argument('--compression_level', type=int, default=3, help='Compression level')
args = parser.parse_args()
main(args)
import argparse
import cv2
import glob
import os
def main(args):
txt_file = open(args.meta_info, 'w')
for folder, root in zip(args.input, args.root):
img_paths = sorted(glob.glob(os.path.join(folder, '*')))
for img_path in img_paths:
status = True
if args.check:
# read the image once for check, as some images may have errors
try:
img = cv2.imread(img_path)
except (IOError, OSError) as error:
print(f'Read {img_path} error: {error}')
status = False
if img is None:
status = False
print(f'Img is None: {img_path}')
if status:
# get the relative path
img_name = os.path.relpath(img_path, root)
print(img_name)
txt_file.write(f'{img_name}\n')
if __name__ == '__main__':
"""Generate meta info (txt file) for only Ground-Truth images.
It can also generate meta info from several folders into one txt file.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--input',
nargs='+',
default=['datasets/DF2K/DF2K_HR', 'datasets/DF2K/DF2K_multiscale'],
help='Input folder, can be a list')
parser.add_argument(
'--root',
nargs='+',
default=['datasets/DF2K', 'datasets/DF2K'],
help='Folder root, should have the length as input folders')
parser.add_argument(
'--meta_info',
type=str,
default='datasets/DF2K/meta_info/meta_info_DF2Kmultiscale.txt',
help='txt path for meta info')
parser.add_argument('--check', action='store_true', help='Read image to check whether it is ok')
args = parser.parse_args()
assert len(args.input) == len(args.root), ('Input folder and folder root should have the same length, but got '
f'{len(args.input)} and {len(args.root)}.')
os.makedirs(os.path.dirname(args.meta_info), exist_ok=True)
main(args)
import argparse
import glob
import os
def main(args):
txt_file = open(args.meta_info, 'w')
# sca images
img_paths_gt = sorted(glob.glob(os.path.join(args.input[0], '*')))
img_paths_lq = sorted(glob.glob(os.path.join(args.input[1], '*')))
assert len(img_paths_gt) == len(img_paths_lq), ('GT folder and LQ folder should have the same length, but got '
f'{len(img_paths_gt)} and {len(img_paths_lq)}.')
for img_path_gt, img_path_lq in zip(img_paths_gt, img_paths_lq):
# get the relative paths
img_name_gt = os.path.relpath(img_path_gt, args.root[0])
img_name_lq = os.path.relpath(img_path_lq, args.root[1])
print(f'{img_name_gt}, {img_name_lq}')
txt_file.write(f'{img_name_gt}, {img_name_lq}\n')
if __name__ == '__main__':
"""This script is used to generate meta info (txt file) for paired images.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--input',
nargs='+',
default=['datasets/DF2K/DIV2K_train_HR_sub', 'datasets/DF2K/DIV2K_train_LR_bicubic_X4_sub'],
help='Input folder, should be [gt_folder, lq_folder]')
parser.add_argument('--root', nargs='+', default=[None, None], help='Folder root, will use the ')
parser.add_argument(
'--meta_info',
type=str,
default='datasets/DF2K/meta_info/meta_info_DIV2K_sub_pair.txt',
help='txt path for meta info')
args = parser.parse_args()
assert len(args.input) == 2, 'Input folder should have two elements: gt folder and lq folder'
assert len(args.root) == 2, 'Root path should have two elements: root for gt folder and lq folder'
os.makedirs(os.path.dirname(args.meta_info), exist_ok=True)
for i in range(2):
if args.input[i].endswith('/'):
args.input[i] = args.input[i][:-1]
if args.root[i] is None:
args.root[i] = os.path.dirname(args.input[i])
main(args)
import argparse
import glob
import os
from PIL import Image
def main(args):
# For DF2K, we consider the following three scales,
# and the smallest image whose shortest edge is 400
scale_list = [0.75, 0.5, 1 / 3]
shortest_edge = 400
path_list = sorted(glob.glob(os.path.join(args.input, '*')))
for path in path_list:
print(path)
basename = os.path.splitext(os.path.basename(path))[0]
img = Image.open(path)
width, height = img.size
for idx, scale in enumerate(scale_list):
print(f'\t{scale:.2f}')
rlt = img.resize((int(width * scale), int(height * scale)), resample=Image.LANCZOS)
rlt.save(os.path.join(args.output, f'{basename}T{idx}.png'))
# save the smallest image which the shortest edge is 400
if width < height:
ratio = height / width
width = shortest_edge
height = int(width * ratio)
else:
ratio = width / height
height = shortest_edge
width = int(height * ratio)
rlt = img.resize((int(width), int(height)), resample=Image.LANCZOS)
rlt.save(os.path.join(args.output, f'{basename}T{idx+1}.png'))
if __name__ == '__main__':
"""Generate multi-scale versions for GT images with LANCZOS resampling.
It is now used for DF2K dataset (DIV2K + Flickr 2K)
"""
parser = argparse.ArgumentParser()
parser.add_argument('--input', type=str, default='datasets/DF2K/DF2K_HR', help='Input folder')
parser.add_argument('--output', type=str, default='datasets/DF2K/DF2K_multiscale', help='Output folder')
args = parser.parse_args()
os.makedirs(args.output, exist_ok=True)
main(args)
import argparse
import torch
import torch.onnx
from basicsr.archs.rrdbnet_arch import RRDBNet
def main(args):
# An instance of the model
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4)
if args.params:
keyname = 'params'
else:
keyname = 'params_ema'
model.load_state_dict(torch.load(args.input)[keyname])
# set the train mode to false since we will only run the forward pass.
model.train(False)
model.cpu().eval()
# An example input
x = torch.rand(1, 3, 64, 64)
# Export the model
with torch.no_grad():
torch_out = torch.onnx._export(model, x, args.output, opset_version=11, export_params=True)
print(torch_out.shape)
if __name__ == '__main__':
"""Convert pytorch model to onnx models"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--input', type=str, default='experiments/pretrained_models/RealESRGAN_x4plus.pth', help='Input model path')
parser.add_argument('--output', type=str, default='realesrgan-x4.onnx', help='Output onnx path')
parser.add_argument('--params', action='store_false', help='Use params instead of params_ema')
args = parser.parse_args()
main(args)
[flake8]
ignore =
# line break before binary operator (W503)
W503,
# line break after binary operator (W504)
W504,
max-line-length=120
[yapf]
based_on_style = pep8
column_limit = 120
blank_line_before_nested_class_or_def = true
split_before_expression_after_opening_paren = true
[isort]
line_length = 120
multi_line_output = 0
known_standard_library = pkg_resources,setuptools
known_first_party = realesrgan
known_third_party = PIL,basicsr,cv2,numpy,pytest,torch,torchvision,tqdm,yaml
no_lines_before = STDLIB,LOCALFOLDER
default_section = THIRDPARTY
[codespell]
skip = .git,./docs/build
count =
quiet-level = 3
[aliases]
test=pytest
[tool:pytest]
addopts=tests/
#!/usr/bin/env python
from setuptools import find_packages, setup
import os
import subprocess
import time
version_file = 'realesrgan/version.py'
def readme():
with open('README.md', encoding='utf-8') as f:
content = f.read()
return content
def get_git_hash():
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH', 'HOME']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
sha = out.strip().decode('ascii')
except OSError:
sha = 'unknown'
return sha
def get_hash():
if os.path.exists('.git'):
sha = get_git_hash()[:7]
else:
sha = 'unknown'
return sha
def write_version_py():
content = """# GENERATED VERSION FILE
# TIME: {}
__version__ = '{}'
__gitsha__ = '{}'
version_info = ({})
"""
sha = get_hash()
with open('VERSION', 'r') as f:
SHORT_VERSION = f.read().strip()
VERSION_INFO = ', '.join([x if x.isdigit() else f'"{x}"' for x in SHORT_VERSION.split('.')])
version_file_str = content.format(time.asctime(), SHORT_VERSION, sha, VERSION_INFO)
with open(version_file, 'w') as f:
f.write(version_file_str)
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
def get_requirements(filename='requirements.txt'):
here = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(here, filename), 'r') as f:
requires = [line.replace('\n', '') for line in f.readlines()]
return requires
if __name__ == '__main__':
write_version_py()
setup(
name='realesrgan',
version=get_version(),
description='Real-ESRGAN aims at developing Practical Algorithms for General Image Restoration',
long_description=readme(),
long_description_content_type='text/markdown',
author='Xintao Wang',
author_email='xintao.wang@outlook.com',
keywords='computer vision, pytorch, image restoration, super-resolution, esrgan, real-esrgan',
url='https://github.com/xinntao/Real-ESRGAN',
include_package_data=True,
packages=find_packages(exclude=('options', 'datasets', 'experiments', 'results', 'tb_logger', 'wandb')),
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
license='BSD-3-Clause License',
setup_requires=['cython', 'numpy'],
install_requires=get_requirements(),
zip_safe=False)
baboon.png (480,500,3) 1
comic.png (360,240,3) 1
baboon.png (120,125,3) 1
comic.png (80,60,3) 1
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment