Commit b6c19984 authored by dengjb's avatar dengjb
Browse files

update

parents
# encoding: utf-8
"""
@author: xingyu liao
@contact: sherlockliao01@gmail.com
"""
import argparse
import logging
import sys
import torch
sys.path.append('.')
import pytorch_to_caffe
from fastreid.config import get_cfg
from fastreid.modeling.meta_arch import build_model
from fastreid.utils.file_io import PathManager
from fastreid.utils.checkpoint import Checkpointer
from fastreid.utils.logger import setup_logger
# import some modules added in project like this below
# sys.path.append("projects/PartialReID")
# from partialreid import *
setup_logger(name='fastreid')
logger = logging.getLogger("fastreid.caffe_export")
def setup_cfg(args):
cfg = get_cfg()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
return cfg
def get_parser():
parser = argparse.ArgumentParser(description="Convert Pytorch to Caffe model")
parser.add_argument(
"--config-file",
metavar="FILE",
help="path to config file",
)
parser.add_argument(
"--name",
default="baseline",
help="name for converted model"
)
parser.add_argument(
"--output",
default='caffe_model',
help='path to save converted caffe model'
)
parser.add_argument(
"--opts",
help="Modify config options using the command-line 'KEY VALUE' pairs",
default=[],
nargs=argparse.REMAINDER,
)
return parser
if __name__ == '__main__':
args = get_parser().parse_args()
cfg = setup_cfg(args)
cfg.defrost()
cfg.MODEL.BACKBONE.PRETRAIN = False
cfg.MODEL.HEADS.POOL_LAYER = "Identity"
cfg.MODEL.BACKBONE.WITH_NL = False
model = build_model(cfg)
Checkpointer(model).load(cfg.MODEL.WEIGHTS)
model.eval()
logger.info(model)
inputs = torch.randn(1, 3, cfg.INPUT.SIZE_TEST[0], cfg.INPUT.SIZE_TEST[1]).to(torch.device(cfg.MODEL.DEVICE))
PathManager.mkdirs(args.output)
pytorch_to_caffe.trans_net(model, inputs, args.name)
pytorch_to_caffe.save_prototxt(f"{args.output}/{args.name}.prototxt")
pytorch_to_caffe.save_caffemodel(f"{args.output}/{args.name}.caffemodel")
logger.info(f"Export caffe model in {args.output} sucessfully!")
# encoding: utf-8
"""
@author: xingyu liao
@contact: sherlockliao01@gmail.com
"""
import caffe
import tqdm
import glob
import os
import cv2
import numpy as np
caffe.set_mode_gpu()
import argparse
def get_parser():
parser = argparse.ArgumentParser(description="Caffe model inference")
parser.add_argument(
"--model-def",
default="logs/test_caffe/baseline_R50.prototxt",
help="caffe model prototxt"
)
parser.add_argument(
"--model-weights",
default="logs/test_caffe/baseline_R50.caffemodel",
help="caffe model weights"
)
parser.add_argument(
"--input",
nargs="+",
help="A list of space separated input images; "
"or a single glob pattern such as 'directory/*.jpg'",
)
parser.add_argument(
"--output",
default='caffe_output',
help='path to save converted caffe model'
)
parser.add_argument(
"--height",
type=int,
default=256,
help="height of image"
)
parser.add_argument(
"--width",
type=int,
default=128,
help="width of image"
)
return parser
def preprocess(image_path, image_height, image_width):
original_image = cv2.imread(image_path)
# the model expects RGB inputs
original_image = original_image[:, :, ::-1]
# Apply pre-processing to image.
image = cv2.resize(original_image, (image_width, image_height), interpolation=cv2.INTER_CUBIC)
image = image.astype("float32").transpose(2, 0, 1)[np.newaxis] # (1, 3, h, w)
image = (image - np.array([0.485 * 255, 0.456 * 255, 0.406 * 255]).reshape((1, -1, 1, 1))) / np.array(
[0.229 * 255, 0.224 * 255, 0.225 * 255]).reshape((1, -1, 1, 1))
return image
def normalize(nparray, order=2, axis=-1):
"""Normalize a N-D numpy array along the specified axis."""
norm = np.linalg.norm(nparray, ord=order, axis=axis, keepdims=True)
return nparray / (norm + np.finfo(np.float32).eps)
if __name__ == "__main__":
args = get_parser().parse_args()
net = caffe.Net(args.model_def, args.model_weights, caffe.TEST)
net.blobs['blob1'].reshape(1, 3, args.height, args.width)
if not os.path.exists(args.output): os.makedirs(args.output)
if args.input:
if os.path.isdir(args.input[0]):
args.input = glob.glob(os.path.expanduser(args.input[0]))
assert args.input, "The input path(s) was not found"
for path in tqdm.tqdm(args.input):
image = preprocess(path, args.height, args.width)
net.blobs["blob1"].data[...] = image
feat = net.forward()["output"]
feat = normalize(feat[..., 0, 0], axis=1)
np.save(os.path.join(args.output, os.path.basename(path).split('.')[0] + '.npy'), feat)
# encoding: utf-8
"""
@author: xingyu liao
@contact: sherlockliao01@gmail.com
"""
import logging
import os
import argparse
import io
import sys
import onnx
import onnxoptimizer
import torch
from onnxsim import simplify
from torch.onnx import OperatorExportTypes
sys.path.append('.')
from fastreid.config import get_cfg
from fastreid.modeling.meta_arch import build_model
from fastreid.utils.file_io import PathManager
from fastreid.utils.checkpoint import Checkpointer
from fastreid.utils.logger import setup_logger
# import some modules added in project like this below
# sys.path.append("projects/FastDistill")
# from fastdistill import *
setup_logger(name="fastreid")
logger = logging.getLogger("fastreid.onnx_export")
def setup_cfg(args):
cfg = get_cfg()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
return cfg
def get_parser():
parser = argparse.ArgumentParser(description="Convert Pytorch to ONNX model")
parser.add_argument(
"--config-file",
metavar="FILE",
help="path to config file",
)
parser.add_argument(
"--name",
default="baseline",
help="name for converted model"
)
parser.add_argument(
"--output",
default='onnx_model',
help='path to save converted onnx model'
)
parser.add_argument(
'--batch-size',
default=1,
type=int,
help="the maximum batch size of onnx runtime"
)
parser.add_argument(
"--opts",
help="Modify config options using the command-line 'KEY VALUE' pairs",
default=[],
nargs=argparse.REMAINDER,
)
return parser
def remove_initializer_from_input(model):
if model.ir_version < 4:
print(
'Model with ir_version below 4 requires to include initilizer in graph input'
)
return
inputs = model.graph.input
name_to_input = {}
for input in inputs:
name_to_input[input.name] = input
for initializer in model.graph.initializer:
if initializer.name in name_to_input:
inputs.remove(name_to_input[initializer.name])
return model
def export_onnx_model(model, inputs):
"""
Trace and export a model to onnx format.
Args:
model (nn.Module):
inputs (torch.Tensor): the model will be called by `model(*inputs)`
Returns:
an onnx model
"""
assert isinstance(model, torch.nn.Module)
# make sure all modules are in eval mode, onnx may change the training state
# of the module if the states are not consistent
def _check_eval(module):
assert not module.training
model.apply(_check_eval)
logger.info("Beginning ONNX file converting")
# Export the model to ONNX
with torch.no_grad():
with io.BytesIO() as f:
torch.onnx.export(
model,
inputs,
f,
operator_export_type=OperatorExportTypes.ONNX_ATEN_FALLBACK,
# verbose=True, # NOTE: uncomment this for debugging
# export_params=True,
)
onnx_model = onnx.load_from_string(f.getvalue())
logger.info("Completed convert of ONNX model")
# Apply ONNX's Optimization
logger.info("Beginning ONNX model path optimization")
all_passes = onnxoptimizer.get_available_passes()
passes = ["extract_constant_to_initializer", "eliminate_unused_initializer", "fuse_bn_into_conv"]
assert all(p in all_passes for p in passes)
onnx_model = onnxoptimizer.optimize(onnx_model, passes)
logger.info("Completed ONNX model path optimization")
return onnx_model
if __name__ == '__main__':
args = get_parser().parse_args()
cfg = setup_cfg(args)
cfg.defrost()
cfg.MODEL.BACKBONE.PRETRAIN = False
if cfg.MODEL.HEADS.POOL_LAYER == 'FastGlobalAvgPool':
cfg.MODEL.HEADS.POOL_LAYER = 'GlobalAvgPool'
model = build_model(cfg)
Checkpointer(model).load(cfg.MODEL.WEIGHTS)
if hasattr(model.backbone, 'deploy'):
model.backbone.deploy(True)
model.eval()
logger.info(model)
inputs = torch.randn(args.batch_size, 3, cfg.INPUT.SIZE_TEST[0], cfg.INPUT.SIZE_TEST[1]).to(model.device)
onnx_model = export_onnx_model(model, inputs)
model_simp, check = simplify(onnx_model)
model_simp = remove_initializer_from_input(model_simp)
assert check, "Simplified ONNX model could not be validated"
PathManager.mkdirs(args.output)
save_path = os.path.join(args.output, args.name+'.onnx')
onnx.save_model(model_simp, save_path)
logger.info("ONNX model file has already saved to {}!".format(save_path))
# encoding: utf-8
"""
@author: xingyu liao
@contact: sherlockliao01@gmail.com
"""
import argparse
import glob
import os
import cv2
import numpy as np
import onnxruntime
import tqdm
def get_parser():
parser = argparse.ArgumentParser(description="onnx model inference")
parser.add_argument(
"--model-path",
default="onnx_model/baseline.onnx",
help="onnx model path"
)
parser.add_argument(
"--input",
nargs="+",
help="A list of space separated input images; "
"or a single glob pattern such as 'directory/*.jpg'",
)
parser.add_argument(
"--output",
default='onnx_output',
help='path to save converted caffe model'
)
parser.add_argument(
"--height",
type=int,
default=256,
help="height of image"
)
parser.add_argument(
"--width",
type=int,
default=128,
help="width of image"
)
return parser
def preprocess(image_path, image_height, image_width):
original_image = cv2.imread(image_path)
# the model expects RGB inputs
original_image = original_image[:, :, ::-1]
# Apply pre-processing to image.
img = cv2.resize(original_image, (image_width, image_height), interpolation=cv2.INTER_CUBIC)
img = img.astype("float32").transpose(2, 0, 1)[np.newaxis] # (1, 3, h, w)
return img
def normalize(nparray, order=2, axis=-1):
"""Normalize a N-D numpy array along the specified axis."""
norm = np.linalg.norm(nparray, ord=order, axis=axis, keepdims=True)
return nparray / (norm + np.finfo(np.float32).eps)
if __name__ == "__main__":
args = get_parser().parse_args()
ort_sess = onnxruntime.InferenceSession(args.model_path)
input_name = ort_sess.get_inputs()[0].name
if not os.path.exists(args.output): os.makedirs(args.output)
if args.input:
if os.path.isdir(args.input[0]):
args.input = glob.glob(os.path.expanduser(args.input[0]))
assert args.input, "The input path(s) was not found"
for path in tqdm.tqdm(args.input):
image = preprocess(path, args.height, args.width)
feat = ort_sess.run(None, {input_name: image})[0]
feat = normalize(feat, axis=1)
np.save(os.path.join(args.output, path.replace('.jpg', '.npy').split('/')[-1]), feat)
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import torch
import torch.nn as nn
import traceback
from Caffe import caffe_net
import torch.nn.functional as F
from torch.autograd import Variable
from Caffe import layer_param
from torch.nn.modules.utils import _pair
import numpy as np
import math
from torch.nn.modules.utils import _list_with_default
"""
How to support a new layer type:
layer_name=log.add_layer(layer_type_name)
top_blobs=log.add_blobs(<output of that layer>)
layer=caffe_net.Layer_param(xxx)
<set layer parameters>
[<layer.add_data(*datas)>]
log.cnet.add_layer(layer)
Please MUTE the inplace operations to avoid not find in graph
"""
# TODO: support the inplace output of the layers
class Blob_LOG():
def __init__(self):
self.data = {}
def __setitem__(self, key, value):
self.data[key] = value
def __getitem__(self, key):
return self.data[key]
def __len__(self):
return len(self.data)
NET_INITTED = False
# 转换原理解析:通过记录
class TransLog(object):
def __init__(self):
"""
doing init() with inputs Variable before using it
"""
self.layers = {}
self.detail_layers = {}
self.detail_blobs = {}
self._blobs = Blob_LOG()
self._blobs_data = []
self.cnet = caffe_net.Caffemodel('')
self.debug = True
def init(self, inputs):
"""
:param inputs: is a list of input variables
"""
self.add_blobs(inputs)
def add_layer(self, name='layer'):
if name in self.layers:
return self.layers[name]
if name not in self.detail_layers.keys():
self.detail_layers[name] = 0
self.detail_layers[name] += 1
name = '{}{}'.format(name, self.detail_layers[name])
self.layers[name] = name
if self.debug:
print("{} was added to layers".format(self.layers[name]))
return self.layers[name]
def add_blobs(self, blobs, name='blob', with_num=True):
rst = []
for blob in blobs:
self._blobs_data.append(blob) # to block the memory address be rewrited
blob_id = int(id(blob))
if name not in self.detail_blobs.keys():
self.detail_blobs[name] = 0
self.detail_blobs[name] += 1
if with_num:
rst.append('{}{}'.format(name, self.detail_blobs[name]))
else:
rst.append('{}'.format(name))
if self.debug:
print("{}:{} was added to blobs".format(blob_id, rst[-1]))
print('Add blob {} : {}'.format(rst[-1].center(21), blob.size()))
self._blobs[blob_id] = rst[-1]
return rst
def blobs(self, var):
var = id(var)
if self.debug:
print("{}:{} getting".format(var, self._blobs[var]))
try:
return self._blobs[var]
except:
print("WARNING: CANNOT FOUND blob {}".format(var))
return None
log = TransLog()
layer_names = {}
def _conv2d(raw, input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):
x = raw(input, weight, bias, stride, padding, dilation, groups)
name = log.add_layer(name='conv')
log.add_blobs([x], name='conv_blob')
layer = caffe_net.Layer_param(name=name, type='Convolution',
bottom=[log.blobs(input)], top=[log.blobs(x)])
layer.conv_param(x.size()[1], weight.size()[2:], stride=_pair(stride),
pad=_pair(padding), dilation=_pair(dilation), bias_term=bias is not None, groups=groups)
if bias is not None:
layer.add_data(weight.cpu().data.numpy(), bias.cpu().data.numpy())
#print('conv2d weight, bias: ',weight.cpu().data.numpy(), bias.cpu().data.numpy())
else:
layer.param.convolution_param.bias_term = False
layer.add_data(weight.cpu().data.numpy())
log.cnet.add_layer(layer)
return x
def _conv_transpose2d(raw, input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1):
x = raw(input, weight, bias, stride, padding, output_padding, groups, dilation)
name = log.add_layer(name='conv_transpose')
log.add_blobs([x], name='conv_transpose_blob')
layer = caffe_net.Layer_param(name=name, type='Deconvolution',
bottom=[log.blobs(input)], top=[log.blobs(x)])
layer.conv_param(x.size()[1], weight.size()[2:], stride=_pair(stride),
pad=_pair(padding), dilation=_pair(dilation), bias_term=bias is not None)
if bias is not None:
layer.add_data(weight.cpu().data.numpy(), bias.cpu().data.numpy())
else:
layer.param.convolution_param.bias_term = False
layer.add_data(weight.cpu().data.numpy())
log.cnet.add_layer(layer)
return x
def _linear(raw, input, weight, bias=None):
x = raw(input, weight, bias)
layer_name = log.add_layer(name='fc')
top_blobs = log.add_blobs([x], name='fc_blob')
layer = caffe_net.Layer_param(name=layer_name, type='InnerProduct',
bottom=[log.blobs(input)], top=top_blobs)
layer.fc_param(x.size()[1], has_bias=bias is not None)
if bias is not None:
layer.add_data(weight.cpu().data.numpy(), bias.cpu().data.numpy())
else:
layer.add_data(weight.cpu().data.numpy())
log.cnet.add_layer(layer)
return x
def _split(raw, tensor, split_size, dim=0):
# split in pytorch is slice in caffe
x = raw(tensor, split_size, dim)
layer_name = log.add_layer('split')
top_blobs = log.add_blobs(x, name='split_blob')
layer = caffe_net.Layer_param(name=layer_name, type='Slice',
bottom=[log.blobs(tensor)], top=top_blobs)
slice_num = int(np.floor(tensor.size()[dim] / split_size))
slice_param = caffe_net.pb.SliceParameter(axis=dim, slice_point=[split_size * i for i in range(1, slice_num)])
layer.param.slice_param.CopyFrom(slice_param)
log.cnet.add_layer(layer)
return x
def _pool(type, raw, input, x, kernel_size, stride, padding, ceil_mode):
# TODO dilation,ceil_mode,return indices
layer_name = log.add_layer(name='{}_pool'.format(type))
top_blobs = log.add_blobs([x], name='{}_pool_blob'.format(type))
layer = caffe_net.Layer_param(name=layer_name, type='Pooling', bottom=[log.blobs(input)], top=top_blobs)
# TODO w,h different kernel, stride and padding
# processing ceil mode
layer.pool_param(kernel_size=kernel_size, stride=kernel_size if stride is None else stride,
pad=padding, type=type.upper())
log.cnet.add_layer(layer)
if ceil_mode == False and stride is not None:
oheight = (input.size()[2] - _pair(kernel_size)[0] + 2 * _pair(padding)[0]) % (_pair(stride)[0])
owidth = (input.size()[3] - _pair(kernel_size)[1] + 2 * _pair(padding)[1]) % (_pair(stride)[1])
if oheight != 0 or owidth != 0:
caffe_out = raw(input, kernel_size, stride, padding, ceil_mode=False)
print("WARNING: the output shape miss match at {}: "
"input {} output---Pytorch:{}---Caffe:{}\n"
"This is caused by the different implementation that ceil mode in caffe and the floor mode in pytorch.\n"
"You can add the clip layer in caffe prototxt manually if shape mismatch error is caused in caffe. ".format(
layer_name, input.size(), x.size(), caffe_out.size()))
def _max_pool2d(raw, input, kernel_size, stride=None, padding=0, dilation=1,
ceil_mode=False, return_indices=False):
x = raw(input, kernel_size, stride, padding, dilation, ceil_mode, return_indices)
_pool('max', raw, input, x, kernel_size, stride, padding, ceil_mode)
return x
def _avg_pool2d(raw, input, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True):
x = raw(input, kernel_size, stride, padding, ceil_mode, count_include_pad)
_pool('ave', raw, input, x, kernel_size, stride, padding, ceil_mode)
return x
def _max(raw, *args):
x = raw(*args)
if len(args) == 1:
# TODO max in one tensor
assert NotImplementedError
else:
bottom_blobs = []
for arg in args:
bottom_blobs.append(log.blobs(arg))
layer_name = log.add_layer(name='max')
top_blobs = log.add_blobs([x], name='max_blob')
layer = caffe_net.Layer_param(name=layer_name, type='Eltwise',
bottom=bottom_blobs, top=top_blobs)
layer.param.eltwise_param.operation = 2
log.cnet.add_layer(layer)
return x
def _cat(raw, inputs, dimension=0):
x = raw(inputs, dimension)
bottom_blobs = []
for input in inputs:
bottom_blobs.append(log.blobs(input))
layer_name = log.add_layer(name='cat')
top_blobs = log.add_blobs([x], name='cat_blob')
layer = caffe_net.Layer_param(name=layer_name, type='Concat',
bottom=bottom_blobs, top=top_blobs)
layer.param.concat_param.axis = dimension
log.cnet.add_layer(layer)
return x
def _dropout(raw, input, p=0.5, training=False, inplace=False):
x = raw(input, p, training, inplace)
bottom_blobs = [log.blobs(input)]
layer_name = log.add_layer(name='dropout')
top_blobs = log.add_blobs([x], name=bottom_blobs[0], with_num=False)
layer = caffe_net.Layer_param(name=layer_name, type='Dropout',
bottom=bottom_blobs, top=top_blobs)
layer.param.dropout_param.dropout_ratio = p
layer.param.include.extend([caffe_net.pb.NetStateRule(phase=0)]) # 1 for test, 0 for train
log.cnet.add_layer(layer)
return x
def _threshold(raw, input, threshold, value, inplace=False):
# for threshold or relu
if threshold == 0 and value == 0:
x = raw(input, threshold, value, inplace)
bottom_blobs = [log.blobs(input)]
name = log.add_layer(name='relu')
log.add_blobs([x], name='relu_blob')
layer = caffe_net.Layer_param(name=name, type='ReLU',
bottom=bottom_blobs, top=[log.blobs(x)])
log.cnet.add_layer(layer)
return x
if value != 0:
raise NotImplemented("value !=0 not implemented in caffe")
x = raw(input, input, threshold, value, inplace)
bottom_blobs = [log.blobs(input)]
layer_name = log.add_layer(name='threshold')
top_blobs = log.add_blobs([x], name='threshold_blob')
layer = caffe_net.Layer_param(name=layer_name, type='Threshold',
bottom=bottom_blobs, top=top_blobs)
layer.param.threshold_param.threshold = threshold
log.cnet.add_layer(layer)
return x
def _relu(raw, input, inplace=False):
# for threshold or prelu
x = raw(input, False)
name = log.add_layer(name='relu')
log.add_blobs([x], name='relu_blob')
layer = caffe_net.Layer_param(name=name, type='ReLU',
bottom=[log.blobs(input)], top=[log.blobs(x)])
log.cnet.add_layer(layer)
return x
def _prelu(raw, input, weight):
# for threshold or prelu
x = raw(input, weight)
bottom_blobs = [log.blobs(input)]
name = log.add_layer(name='prelu')
log.add_blobs([x], name='prelu_blob')
layer = caffe_net.Layer_param(name=name, type='PReLU',
bottom=bottom_blobs, top=[log.blobs(x)])
if weight.size()[0] == 1:
layer.param.prelu_param.channel_shared = True
layer.add_data(weight.cpu().data.numpy()[0])
else:
layer.add_data(weight.cpu().data.numpy())
log.cnet.add_layer(layer)
return x
def _leaky_relu(raw, input, negative_slope=0.01, inplace=False):
x = raw(input, negative_slope)
name = log.add_layer(name='leaky_relu')
log.add_blobs([x], name='leaky_relu_blob')
layer = caffe_net.Layer_param(name=name, type='ReLU',
bottom=[log.blobs(input)], top=[log.blobs(x)])
layer.param.relu_param.negative_slope = negative_slope
log.cnet.add_layer(layer)
return x
def _tanh(raw, input):
# for tanh activation
x = raw(input)
name = log.add_layer(name='tanh')
log.add_blobs([x], name='tanh_blob')
layer = caffe_net.Layer_param(name=name, type='TanH',
bottom=[log.blobs(input)], top=[log.blobs(x)])
log.cnet.add_layer(layer)
return x
def _softmax(raw, input, dim=None, _stacklevel=3):
# for F.softmax
x = raw(input, dim=dim)
if dim is None:
dim = F._get_softmax_dim('softmax', input.dim(), _stacklevel)
bottom_blobs = [log.blobs(input)]
name = log.add_layer(name='softmax')
log.add_blobs([x], name='softmax_blob')
layer = caffe_net.Layer_param(name=name, type='Softmax',
bottom=bottom_blobs, top=[log.blobs(x)])
layer.param.softmax_param.axis = dim
log.cnet.add_layer(layer)
return x
def _sigmoid(raw, input):
# for tanh activation
x = raw(input)
name = log.add_layer(name='Sigmoid')
log.add_blobs([x], name='Sigmoid_blob')
layer = caffe_net.Layer_param(name=name, type='Sigmoid',
bottom=[log.blobs(input)], top=[log.blobs(x)])
log.cnet.add_layer(layer)
return x
def _batch_norm(raw, input, running_mean, running_var, weight=None, bias=None,
training=False, momentum=0.1, eps=1e-5):
# because the runing_mean and runing_var will be changed after the _batch_norm operation, we first save the parameters
x = raw(input, running_mean, running_var, weight, bias,
training, momentum, eps)
bottom_blobs = [log.blobs(input)]
layer_name1 = log.add_layer(name='batch_norm')
top_blobs = log.add_blobs([x], name='batch_norm_blob')
layer1 = caffe_net.Layer_param(name=layer_name1, type='BatchNorm',
bottom=bottom_blobs, top=top_blobs)
if running_mean is None or running_var is None:
# not use global_stats, normalization is performed over the current mini-batch
layer1.batch_norm_param(use_global_stats=0, eps=eps)
else:
layer1.batch_norm_param(use_global_stats=1, eps=eps)
running_mean_clone = running_mean.clone()
running_var_clone = running_var.clone()
layer1.add_data(running_mean_clone.cpu().numpy(), running_var_clone.cpu().numpy(), np.array([1.0]))
#print('running_mean: ',running_mean_clone.cpu().numpy())
#print('running_var: ',running_var_clone.cpu().numpy())
log.cnet.add_layer(layer1)
if weight is not None and bias is not None:
layer_name2 = log.add_layer(name='bn_scale')
layer2 = caffe_net.Layer_param(name=layer_name2, type='Scale',
bottom=top_blobs, top=top_blobs)
layer2.param.scale_param.bias_term = True
layer2.add_data(weight.cpu().data.numpy(), bias.cpu().data.numpy())
log.cnet.add_layer(layer2)
#print('scale weight: ', weight.cpu().data.numpy())
#print('scale bias: ', bias.cpu().data.numpy())
return x
def _instance_norm(raw, input, running_mean=None, running_var=None, weight=None,
bias=None, use_input_stats=True, momentum=0.1, eps=1e-5):
# TODO: the batch size!=1 view operations
print("WARNING: The Instance Normalization transfers to Caffe using BatchNorm, so the batch size should be 1")
if running_var is not None or weight is not None:
# TODO: the affine=True or track_running_stats=True case
raise NotImplementedError("not implement the affine=True or track_running_stats=True case InstanceNorm")
x = torch.batch_norm(
input, weight, bias, running_mean, running_var,
use_input_stats, momentum, eps, torch.backends.cudnn.enabled)
bottom_blobs = [log.blobs(input)]
layer_name1 = log.add_layer(name='instance_norm')
top_blobs = log.add_blobs([x], name='instance_norm_blob')
layer1 = caffe_net.Layer_param(name=layer_name1, type='BatchNorm',
bottom=bottom_blobs, top=top_blobs)
if running_mean is None or running_var is None:
# not use global_stats, normalization is performed over the current mini-batch
layer1.batch_norm_param(use_global_stats=0, eps=eps)
running_mean = torch.zeros(input.size()[1])
running_var = torch.ones(input.size()[1])
else:
layer1.batch_norm_param(use_global_stats=1, eps=eps)
running_mean_clone = running_mean.clone()
running_var_clone = running_var.clone()
layer1.add_data(running_mean_clone.cpu().numpy(), running_var_clone.cpu().numpy(), np.array([1.0]))
log.cnet.add_layer(layer1)
if weight is not None and bias is not None:
layer_name2 = log.add_layer(name='bn_scale')
layer2 = caffe_net.Layer_param(name=layer_name2, type='Scale',
bottom=top_blobs, top=top_blobs)
layer2.param.scale_param.bias_term = True
layer2.add_data(weight.cpu().data.numpy(), bias.cpu().data.numpy())
log.cnet.add_layer(layer2)
return x
# upsample layer
def _interpolate(raw, input, size=None, scale_factor=None, mode='nearest', align_corners=None):
# 定义的参数包括 scale,即输出与输入的尺寸比例,如 2;scale_h、scale_w,
# 同 scale,分别为 h、w 方向上的尺寸比例;pad_out_h、pad_out_w,仅在 scale 为 2 时
# 有用,对输出进行额外 padding 在 h、w 方向上的数值;upsample_h、upsample_w,输
# 出图像尺寸的数值。在 Upsample 的相关代码中,推荐仅仅使用 upsample_h、
# upsample_w 准确定义 Upsample 层的输出尺寸,其他所有的参数都不推荐继续使用。
'''
if mode == 'bilinear':
x = raw(input, size, scale_factor, mode)
name = log.add_layer(name='conv_transpose')
log.add_blobs([x], name='conv_transpose_blob')
layer = caffe_net.Layer_param(name=name, type='Deconvolution',
bottom=[log.blobs(input)], top=[log.blobs(x)])
print('Deconv: ', name)
print(input.shape)
print(x.size())
print(size)
factor = float(size[0]) / input.shape[2]
C = x.size()[1]
print(factor,C)
kernel_size = int(2 * factor - factor % 2)
stride = int(factor)
num_output = C
group = C
pad = math.ceil((factor-1) / 2.)
print('kernel_size, stride, num_output, group, pad')
print(kernel_size, stride, num_output, group, pad)
layer.conv_param(num_output, kernel_size, stride=stride,
pad=pad, weight_filler_type='bilinear', bias_term=False, groups=group)
layer.param.convolution_param.bias_term = False
log.cnet.add_layer(layer)
return x
'''
# transfer bilinear align_corners=True to caffe-interp
if mode == "bilinear" and align_corners == True:
x = raw(input, size, scale_factor, mode)
name = log.add_layer(name='interp')
log.add_blobs([x], name='interp_blob')
layer = caffe_net.Layer_param(name=name, type='Interp',
bottom=[log.blobs(input)], top=[log.blobs(x)])
layer.interp_param(size=size, scale_factor=scale_factor)
log.cnet.add_layer(layer)
return x
# for nearest _interpolate
if mode != "nearest" or align_corners != None:
raise NotImplementedError("not implement F.interpolate totoaly")
x = raw(input, size, scale_factor, mode)
layer_name = log.add_layer(name='upsample')
top_blobs = log.add_blobs([x], name='upsample_blob'.format(type))
layer = caffe_net.Layer_param(name=layer_name, type='Upsample',
bottom=[log.blobs(input)], top=top_blobs)
#layer.upsample_param(size=(input.size(2), input.size(3)), scale_factor=scale_factor)
#layer.upsample_param(size=size, scale_factor=scale_factor)
layer.upsample_param(size=None, scale_factor=size[0])
log.cnet.add_layer(layer)
return x
# ----- for Variable operations --------
def _view(input, *args):
x = raw_view(input, *args)
if not NET_INITTED:
return x
layer_name = log.add_layer(name='view')
top_blobs = log.add_blobs([x], name='view_blob')
# print('*'*60)
# print('input={}'.format(input))
# print('layer_name={}'.format(layer_name))
# print('top_blobs={}'.format(top_blobs))
layer = caffe_net.Layer_param(name=layer_name, type='Reshape', bottom=[log.blobs(input)], top=top_blobs)
# TODO: reshpae added to nn_tools layer
dims = list(args)
dims[0] = 0 # the first dim should be batch_size
layer.param.reshape_param.shape.CopyFrom(caffe_net.pb.BlobShape(dim=dims))
log.cnet.add_layer(layer)
return x
def _mean(input, *args, **kwargs):
x = raw_mean(input, *args, **kwargs)
if not NET_INITTED:
return x
layer_name = log.add_layer(name='mean')
top_blobs = log.add_blobs([x], name='mean_blob')
layer = caffe_net.Layer_param(name=layer_name, type='Reduction',
bottom=[log.blobs(input)], top=top_blobs)
if len(args) == 1:
dim = args[0]
elif 'dim' in kwargs:
dim = kwargs['dim']
else:
raise NotImplementedError('mean operation must specify a dim')
layer.param.reduction_param.operation = 4
layer.param.reduction_param.axis = dim
log.cnet.add_layer(layer)
return x
def _add(input, *args):
# check if add a const value
if isinstance(args[0], int):
print('value: ',args[0])
x = raw__add__(input, *args)
#x = raw(input)
layer_name = log.add_layer(name='scale')
log.add_blobs([x], name='Scale_blob')
layer = caffe_net.Layer_param(name=layer_name, type='Scale',
bottom=[log.blobs(input)], top=[log.blobs(x)])
dim = x.shape[1]
layer.param.scale_param.bias_term = True
weight = np.ones(dim, dtype=np.float32)
bias = args[0] * np.ones(dim, dtype=np.float32)
layer.add_data(weight, bias)
log.cnet.add_layer(layer)
return x
# otherwise add a tensor
x = raw__add__(input, *args)
if not NET_INITTED:
return x
layer_name = log.add_layer(name='add')
top_blobs = log.add_blobs([x], name='add_blob')
layer = caffe_net.Layer_param(name=layer_name, type='Eltwise',
bottom=[log.blobs(input), log.blobs(args[0])], top=top_blobs)
layer.param.eltwise_param.operation = 1 # sum is 1
log.cnet.add_layer(layer)
return x
def _iadd(input, *args):
x = raw__iadd__(input, *args)
if not NET_INITTED:
return x
x = x.clone()
layer_name = log.add_layer(name='add')
top_blobs = log.add_blobs([x], name='add_blob')
layer = caffe_net.Layer_param(name=layer_name, type='Eltwise',
bottom=[log.blobs(input), log.blobs(args[0])], top=top_blobs)
layer.param.eltwise_param.operation = 1 # sum is 1
log.cnet.add_layer(layer)
return x
def _sub(input, *args):
x = raw__sub__(input, *args)
if not NET_INITTED:
return x
layer_name = log.add_layer(name='sub')
top_blobs = log.add_blobs([x], name='sub_blob')
layer = caffe_net.Layer_param(name=layer_name, type='Eltwise',
bottom=[log.blobs(input), log.blobs(args[0])], top=top_blobs)
layer.param.eltwise_param.operation = 1 # sum is 1
layer.param.eltwise_param.coeff.extend([1., -1.])
log.cnet.add_layer(layer)
return x
def _isub(input, *args):
x = raw__isub__(input, *args)
if not NET_INITTED:
return x
x = x.clone()
layer_name = log.add_layer(name='sub')
top_blobs = log.add_blobs([x], name='sub_blob')
layer = caffe_net.Layer_param(name=layer_name, type='Eltwise',
bottom=[log.blobs(input), log.blobs(args[0])], top=top_blobs)
layer.param.eltwise_param.operation = 1 # sum is 1
log.cnet.add_layer(layer)
return x
def _mul(input, *args):
x = raw__sub__(input, *args)
if not NET_INITTED:
return x
layer_name = log.add_layer(name='mul')
top_blobs = log.add_blobs([x], name='mul_blob')
layer = caffe_net.Layer_param(name=layer_name, type='Eltwise',
bottom=[log.blobs(input), log.blobs(args[0])], top=top_blobs)
layer.param.eltwise_param.operation = 0 # product is 1
log.cnet.add_layer(layer)
return x
def _imul(input, *args):
x = raw__isub__(input, *args)
if not NET_INITTED:
return x
x = x.clone()
layer_name = log.add_layer(name='mul')
top_blobs = log.add_blobs([x], name='mul_blob')
layer = caffe_net.Layer_param(name=layer_name, type='Eltwise',
bottom=[log.blobs(input), log.blobs(args[0])], top=top_blobs)
layer.param.eltwise_param.operation = 0 # product is 1
layer.param.eltwise_param.coeff.extend([1., -1.])
log.cnet.add_layer(layer)
return x
def _adaptive_avg_pool2d(raw, input, output_size):
_output_size = _list_with_default(output_size, input.size())
x = raw(input, _output_size)
_pool('ave', raw, input, x, input.shape[2], input.shape[2], 0, False)
return x
# 核心组件,通过该类,实现对torch的function中的operators的输入,输出以及参数的读取
class Rp(object):
def __init__(self, raw, replace, **kwargs):
# replace the raw function to replace function
self.obj = replace
self.raw = raw
def __call__(self, *args, **kwargs):
if not NET_INITTED:
return self.raw(*args, **kwargs)
for stack in traceback.walk_stack(None):
if 'self' in stack[0].f_locals:
layer = stack[0].f_locals['self']
if layer in layer_names:
log.pytorch_layer_name = layer_names[layer]
print(layer_names[layer])
break
out = self.obj(self.raw, *args, **kwargs)
# if isinstance(out,Variable):
# out=[out]
return out
F.conv2d = Rp(F.conv2d, _conv2d)
F.linear = Rp(F.linear, _linear)
F.relu = Rp(F.relu, _relu)
F.leaky_relu = Rp(F.leaky_relu, _leaky_relu)
F.max_pool2d = Rp(F.max_pool2d, _max_pool2d)
F.avg_pool2d = Rp(F.avg_pool2d, _avg_pool2d)
F.dropout = Rp(F.dropout, _dropout)
F.threshold = Rp(F.threshold, _threshold)
F.prelu = Rp(F.prelu, _prelu)
F.batch_norm = Rp(F.batch_norm, _batch_norm)
F.instance_norm = Rp(F.instance_norm, _instance_norm)
F.softmax = Rp(F.softmax, _softmax)
F.conv_transpose2d = Rp(F.conv_transpose2d, _conv_transpose2d)
F.interpolate = Rp(F.interpolate, _interpolate)
F.adaptive_avg_pool2d = Rp(F.adaptive_avg_pool2d, _adaptive_avg_pool2d)
torch.split = Rp(torch.split, _split)
torch.max = Rp(torch.max, _max)
torch.cat = Rp(torch.cat, _cat)
torch.sigmoid = Rp(torch.sigmoid, _sigmoid)
# TODO: other types of the view function
try:
raw_view = Variable.view
Variable.view = _view
raw_mean = Variable.mean
Variable.mean = _mean
raw__add__ = Variable.__add__
Variable.__add__ = _add
raw__iadd__ = Variable.__iadd__
Variable.__iadd__ = _iadd
raw__sub__ = Variable.__sub__
Variable.__sub__ = _sub
raw__isub__ = Variable.__isub__
Variable.__isub__ = _isub
raw__mul__ = Variable.__mul__
Variable.__mul__ = _mul
raw__imul__ = Variable.__imul__
Variable.__imul__ = _imul
except:
# for new version 0.4.0 and later version
for t in [torch.Tensor]:
raw_view = t.view
t.view = _view
raw_mean = t.mean
t.mean = _mean
raw__add__ = t.__add__
t.__add__ = _add
raw__iadd__ = t.__iadd__
t.__iadd__ = _iadd
raw__sub__ = t.__sub__
t.__sub__ = _sub
raw__isub__ = t.__isub__
t.__isub__ = _isub
raw__mul__ = t.__mul__
t.__mul__ = _mul
raw__imul__ = t.__imul__
t.__imul__ = _imul
def trans_net(net, input_var, name='TransferedPytorchModel'):
print('Starting Transform, This will take a while')
log.init([input_var])
log.cnet.net.name = name
log.cnet.net.input.extend([log.blobs(input_var)])
log.cnet.net.input_dim.extend(input_var.size())
global NET_INITTED
NET_INITTED = True
for name, layer in net.named_modules():
layer_names[layer] = name
print("torch ops name:", layer_names)
out = net.forward(input_var)
print('Transform Completed')
def save_prototxt(save_name):
log.cnet.save_prototxt(save_name)
def save_caffemodel(save_name):
log.cnet.save(save_name)
# encoding: utf-8
"""
@author: xingyu liao
@contact: sherlockliao01@gmail.com
Create custom calibrator, use to calibrate int8 TensorRT model.
Need to override some methods of trt.IInt8EntropyCalibrator2, such as get_batch_size, get_batch,
read_calibration_cache, write_calibration_cache.
"""
# based on:
# https://github.com/qq995431104/Pytorch2TensorRT/blob/master/myCalibrator.py
import os
import sys
import tensorrt as trt
import pycuda.driver as cuda
import pycuda.autoinit
import numpy as np
import torchvision.transforms as T
sys.path.append('../..')
from fastreid.data.build import _root
from fastreid.data.data_utils import read_image
from fastreid.data.datasets import DATASET_REGISTRY
import logging
from fastreid.data.transforms import ToTensor
logger = logging.getLogger('trt_export.calibrator')
class FeatEntropyCalibrator(trt.IInt8EntropyCalibrator2):
def __init__(self, args):
trt.IInt8EntropyCalibrator2.__init__(self)
self.cache_file = 'reid_feat.cache'
self.batch_size = args.batch_size
self.channel = args.channel
self.height = args.height
self.width = args.width
self.transform = T.Compose([
T.Resize((self.height, self.width), interpolation=3), # [h,w]
ToTensor(),
])
dataset = DATASET_REGISTRY.get(args.calib_data)(root=_root)
self._data_items = dataset.train + dataset.query + dataset.gallery
np.random.shuffle(self._data_items)
self.imgs = [item[0] for item in self._data_items]
self.batch_idx = 0
self.max_batch_idx = len(self.imgs) // self.batch_size
self.data_size = self.batch_size * self.channel * self.height * self.width * trt.float32.itemsize
self.device_input = cuda.mem_alloc(self.data_size)
def next_batch(self):
if self.batch_idx < self.max_batch_idx:
batch_files = self.imgs[self.batch_idx * self.batch_size:(self.batch_idx + 1) * self.batch_size]
batch_imgs = np.zeros((self.batch_size, self.channel, self.height, self.width),
dtype=np.float32)
for i, f in enumerate(batch_files):
img = read_image(f)
img = self.transform(img).numpy()
assert (img.nbytes == self.data_size // self.batch_size), 'not valid img!' + f
batch_imgs[i] = img
self.batch_idx += 1
logger.info("batch:[{}/{}]".format(self.batch_idx, self.max_batch_idx))
return np.ascontiguousarray(batch_imgs)
else:
return np.array([])
def get_batch_size(self):
return self.batch_size
def get_batch(self, names, p_str=None):
try:
batch_imgs = self.next_batch()
batch_imgs = batch_imgs.ravel()
if batch_imgs.size == 0 or batch_imgs.size != self.batch_size * self.channel * self.height * self.width:
return None
cuda.memcpy_htod(self.device_input, batch_imgs.astype(np.float32))
return [int(self.device_input)]
except:
return None
def read_calibration_cache(self):
# If there is a cache, use it instead of calibrating again. Otherwise, implicitly return None.
if os.path.exists(self.cache_file):
with open(self.cache_file, "rb") as f:
return f.read()
def write_calibration_cache(self, cache):
with open(self.cache_file, "wb") as f:
f.write(cache)
# encoding: utf-8
"""
@author: xingyu liao
@contact: sherlockliao01@gmail.com
"""
import argparse
import os
import sys
import tensorrt as trt
from trt_calibrator import FeatEntropyCalibrator
sys.path.append('.')
from fastreid.utils.logger import setup_logger, PathManager
logger = setup_logger(name="trt_export")
def get_parser():
parser = argparse.ArgumentParser(description="Convert ONNX to TRT model")
parser.add_argument(
'--name',
default='baseline',
help="name for converted model"
)
parser.add_argument(
'--output',
default='outputs/trt_model',
help="path to save converted trt model"
)
parser.add_argument(
'--mode',
default='fp32',
help="which mode is used in tensorRT engine, mode can be ['fp32', 'fp16' 'int8']"
)
parser.add_argument(
'--batch-size',
default=1,
type=int,
help="the maximum batch size of trt module"
)
parser.add_argument(
'--height',
default=256,
type=int,
help="input image height"
)
parser.add_argument(
'--width',
default=128,
type=int,
help="input image width"
)
parser.add_argument(
'--channel',
default=3,
type=int,
help="input image channel"
)
parser.add_argument(
'--calib-data',
default='Market1501',
help="int8 calibrator dataset name"
)
parser.add_argument(
"--onnx-model",
default='outputs/onnx_model/baseline.onnx',
help='path to onnx model'
)
return parser
def onnx2trt(
onnx_file_path,
save_path,
mode,
log_level='ERROR',
max_workspace_size=1,
strict_type_constraints=False,
int8_calibrator=None,
):
"""build TensorRT model from onnx model.
Args:
onnx_file_path (string or io object): onnx model name
save_path (string): tensortRT serialization save path
mode (string): Whether or not FP16 or Int8 kernels are permitted during engine build.
log_level (string, default is ERROR): tensorrt logger level, now
INTERNAL_ERROR, ERROR, WARNING, INFO, VERBOSE are support.
max_workspace_size (int, default is 1): The maximum GPU temporary memory which the ICudaEngine can use at
execution time. default is 1GB.
strict_type_constraints (bool, default is False): When strict type constraints is set, TensorRT will choose
the type constraints that conforms to type constraints. If the flag is not enabled higher precision
implementation may be chosen if it results in higher performance.
int8_calibrator (volksdep.calibrators.base.BaseCalibrator, default is None): calibrator for int8 mode,
if None, default calibrator will be used as calibration data.
"""
mode = mode.lower()
assert mode in ['fp32', 'fp16', 'int8'], "mode should be in ['fp32', 'fp16', 'int8'], " \
"but got {}".format(mode)
trt_logger = trt.Logger(getattr(trt.Logger, log_level))
builder = trt.Builder(trt_logger)
logger.info("Loading ONNX file from path {}...".format(onnx_file_path))
EXPLICIT_BATCH = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
network = builder.create_network(EXPLICIT_BATCH)
parser = trt.OnnxParser(network, trt_logger)
if isinstance(onnx_file_path, str):
with open(onnx_file_path, 'rb') as f:
logger.info("Beginning ONNX file parsing")
flag = parser.parse(f.read())
else:
flag = parser.parse(onnx_file_path.read())
if not flag:
for error in range(parser.num_errors):
logger.info(parser.get_error(error))
logger.info("Completed parsing of ONNX file.")
# re-order output tensor
output_tensors = [network.get_output(i) for i in range(network.num_outputs)]
[network.unmark_output(tensor) for tensor in output_tensors]
for tensor in output_tensors:
identity_out_tensor = network.add_identity(tensor).get_output(0)
identity_out_tensor.name = 'identity_{}'.format(tensor.name)
network.mark_output(tensor=identity_out_tensor)
config = builder.create_builder_config()
config.max_workspace_size = max_workspace_size * (1 << 25)
if mode == 'fp16':
assert builder.platform_has_fast_fp16, "not support fp16"
builder.fp16_mode = True
if mode == 'int8':
assert builder.platform_has_fast_int8, "not support int8"
builder.int8_mode = True
builder.int8_calibrator = int8_calibrator
if strict_type_constraints:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
logger.info("Building an engine from file {}; this may take a while...".format(onnx_file_path))
engine = builder.build_cuda_engine(network)
logger.info("Create engine successfully!")
logger.info("Saving TRT engine file to path {}".format(save_path))
with open(save_path, 'wb') as f:
f.write(engine.serialize())
logger.info("Engine file has already saved to {}!".format(save_path))
if __name__ == '__main__':
args = get_parser().parse_args()
onnx_file_path = args.onnx_model
engineFile = os.path.join(args.output, args.name + '.engine')
if args.mode.lower() == 'int8':
int8_calib = FeatEntropyCalibrator(args)
else:
int8_calib = None
PathManager.mkdirs(args.output)
onnx2trt(onnx_file_path, engineFile, args.mode, int8_calibrator=int8_calib)
# encoding: utf-8
"""
@author: xingyu liao
@contact: sherlockliao01@gmail.com
"""
import argparse
import glob
import os
import cv2
import numpy as np
import pycuda.driver as cuda
import tensorrt as trt
import tqdm
TRT_LOGGER = trt.Logger()
def get_parser():
parser = argparse.ArgumentParser(description="trt model inference")
parser.add_argument(
"--model-path",
default="outputs/trt_model/baseline.engine",
help="trt model path"
)
parser.add_argument(
"--input",
nargs="+",
help="A list of space separated input images; "
"or a single glob pattern such as 'directory/*.jpg'",
)
parser.add_argument(
"--output",
default="trt_output",
help="path to save trt model inference results"
)
parser.add_argument(
'--batch-size',
default=1,
type=int,
help='the maximum batch size of trt module'
)
parser.add_argument(
"--height",
type=int,
default=256,
help="height of image"
)
parser.add_argument(
"--width",
type=int,
default=128,
help="width of image"
)
return parser
class HostDeviceMem(object):
""" Host and Device Memory Package """
def __init__(self, host_mem, device_mem):
self.host = host_mem
self.device = device_mem
def __str__(self):
return "Host:\n" + str(self.host) + "\nDevice:\n" + str(self.device)
def __repr__(self):
return self.__str__()
class TrtEngine:
def __init__(self, trt_file=None, gpu_idx=0, batch_size=1):
cuda.init()
self._batch_size = batch_size
self._device_ctx = cuda.Device(gpu_idx).make_context()
self._engine = self._load_engine(trt_file)
self._context = self._engine.create_execution_context()
self._input, self._output, self._bindings, self._stream = self._allocate_buffers(self._context)
def _load_engine(self, trt_file):
"""
Load tensorrt engine.
:param trt_file: tensorrt file.
:return:
ICudaEngine
"""
with open(trt_file, "rb") as f, \
trt.Runtime(TRT_LOGGER) as runtime:
engine = runtime.deserialize_cuda_engine(f.read())
return engine
def _allocate_buffers(self, context):
"""
Allocate device memory space for data.
:param context:
:return:
"""
inputs = []
outputs = []
bindings = []
stream = cuda.Stream()
for binding in self._engine:
size = trt.volume(self._engine.get_binding_shape(binding)) * self._engine.max_batch_size
dtype = trt.nptype(self._engine.get_binding_dtype(binding))
# Allocate host and device buffers
host_mem = cuda.pagelocked_empty(size, dtype)
device_mem = cuda.mem_alloc(host_mem.nbytes)
# Append the device buffer to device bindings.
bindings.append(int(device_mem))
# Append to the appropriate list.
if self._engine.binding_is_input(binding):
inputs.append(HostDeviceMem(host_mem, device_mem))
else:
outputs.append(HostDeviceMem(host_mem, device_mem))
return inputs, outputs, bindings, stream
def infer(self, data):
"""
Real inference process.
:param model: Model objects
:param data: Preprocessed data
:return:
output
"""
# Copy data to input memory buffer
[np.copyto(_inp.host, data.ravel()) for _inp in self._input]
# Push to device
self._device_ctx.push()
# Transfer input data to the GPU.
# cuda.memcpy_htod_async(self._input.device, self._input.host, self._stream)
[cuda.memcpy_htod_async(inp.device, inp.host, self._stream) for inp in self._input]
# Run inference.
self._context.execute_async_v2(bindings=self._bindings, stream_handle=self._stream.handle)
# Transfer predictions back from the GPU.
# cuda.memcpy_dtoh_async(self._output.host, self._output.device, self._stream)
[cuda.memcpy_dtoh_async(out.host, out.device, self._stream) for out in self._output]
# Synchronize the stream
self._stream.synchronize()
# Pop the device
self._device_ctx.pop()
return [out.host.reshape(self._batch_size, -1) for out in self._output[::-1]]
def inference_on_images(self, imgs, new_size=(256, 128)):
trt_inputs = []
for img in imgs:
input_ndarray = self.preprocess(img, *new_size)
trt_inputs.append(input_ndarray)
trt_inputs = np.vstack(trt_inputs)
valid_bsz = trt_inputs.shape[0]
if valid_bsz < self._batch_size:
trt_inputs = np.vstack([trt_inputs, np.zeros((self._batch_size - valid_bsz, 3, *new_size))])
result, = self.infer(trt_inputs)
result = result[:valid_bsz]
feat = self.postprocess(result, axis=1)
return feat
@classmethod
def preprocess(cls, img, img_height, img_width):
# Apply pre-processing to image.
resize_img = cv2.resize(img, (img_width, img_height), interpolation=cv2.INTER_CUBIC)
type_img = resize_img.astype("float32").transpose(2, 0, 1)[np.newaxis] # (1, 3, h, w)
return type_img
@classmethod
def postprocess(cls, nparray, order=2, axis=-1):
"""Normalize a N-D numpy array along the specified axis."""
norm = np.linalg.norm(nparray, ord=order, axis=axis, keepdims=True)
return nparray / (norm + np.finfo(np.float32).eps)
def __del__(self):
del self._input
del self._output
del self._stream
self._device_ctx.detach() # release device context
if __name__ == "__main__":
args = get_parser().parse_args()
trt = TrtEngine(args.model_path, batch_size=args.batch_size)
if not os.path.exists(args.output): os.makedirs(args.output)
if args.input:
if os.path.isdir(args.input[0]):
args.input = glob.glob(os.path.expanduser(args.input[0]))
assert args.input, "The input path(s) was not found"
inputs = []
for img_path in tqdm.tqdm(args.input):
img = cv2.imread(img_path)
# the model expects RGB inputs
cvt_img = img[:, :, ::-1]
feat = trt.inference_on_images([cvt_img])
np.save(os.path.join(args.output, os.path.basename(img_path).split('.')[0] + '.npy'), feat)
# encoding: utf-8
"""
@author: xingyu liao
@contact: sherlockliao01@gmail.com
"""
import logging
import os
import sys
from collections import OrderedDict
import torch
from torch.nn.parallel import DistributedDataParallel
sys.path.append('.')
from fastreid.config import get_cfg
from fastreid.data import build_reid_test_loader, build_reid_train_loader
from fastreid.evaluation.testing import flatten_results_dict
from fastreid.engine import default_argument_parser, default_setup, launch
from fastreid.modeling import build_model
from fastreid.solver import build_lr_scheduler, build_optimizer
from fastreid.evaluation import inference_on_dataset, print_csv_format, ReidEvaluator
from fastreid.utils.checkpoint import Checkpointer, PeriodicCheckpointer
from fastreid.utils import comm
from fastreid.utils.events import (
CommonMetricPrinter,
EventStorage,
JSONWriter,
TensorboardXWriter
)
logger = logging.getLogger("fastreid")
def get_evaluator(cfg, dataset_name, output_dir=None):
data_loader, num_query = build_reid_test_loader(cfg, dataset_name=dataset_name)
return data_loader, ReidEvaluator(cfg, num_query, output_dir)
def do_test(cfg, model):
results = OrderedDict()
for idx, dataset_name in enumerate(cfg.DATASETS.TESTS):
logger.info("Prepare testing set")
try:
data_loader, evaluator = get_evaluator(cfg, dataset_name)
except NotImplementedError:
logger.warn(
"No evaluator found. implement its `build_evaluator` method."
)
results[dataset_name] = {}
continue
results_i = inference_on_dataset(model, data_loader, evaluator, flip_test=cfg.TEST.FLIP.ENABLED)
results[dataset_name] = results_i
if comm.is_main_process():
assert isinstance(
results, dict
), "Evaluator must return a dict on the main process. Got {} instead.".format(
results
)
logger.info("Evaluation results for {} in csv format:".format(dataset_name))
results_i['dataset'] = dataset_name
print_csv_format(results_i)
if len(results) == 1:
results = list(results.values())[0]
return results
def do_train(cfg, model, resume=False):
data_loader = build_reid_train_loader(cfg)
data_loader_iter = iter(data_loader)
model.train()
optimizer = build_optimizer(cfg, model)
iters_per_epoch = len(data_loader.dataset) // cfg.SOLVER.IMS_PER_BATCH
scheduler = build_lr_scheduler(cfg, optimizer, iters_per_epoch)
checkpointer = Checkpointer(
model,
cfg.OUTPUT_DIR,
save_to_disk=comm.is_main_process(),
optimizer=optimizer,
**scheduler
)
start_epoch = (
checkpointer.resume_or_load(cfg.MODEL.WEIGHTS, resume=resume).get("epoch", -1) + 1
)
iteration = start_iter = start_epoch * iters_per_epoch
max_epoch = cfg.SOLVER.MAX_EPOCH
max_iter = max_epoch * iters_per_epoch
warmup_iters = cfg.SOLVER.WARMUP_ITERS
delay_epochs = cfg.SOLVER.DELAY_EPOCHS
periodic_checkpointer = PeriodicCheckpointer(checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD, max_epoch)
if len(cfg.DATASETS.TESTS) == 1:
metric_name = "metric"
else:
metric_name = cfg.DATASETS.TESTS[0] + "/metric"
writers = (
[
CommonMetricPrinter(max_iter),
JSONWriter(os.path.join(cfg.OUTPUT_DIR, "metrics.json")),
TensorboardXWriter(cfg.OUTPUT_DIR)
]
if comm.is_main_process()
else []
)
# compared to "train_net.py", we do not support some hooks, such as
# accurate timing, FP16 training and precise BN here,
# because they are not trivial to implement in a small training loop
logger.info("Start training from epoch {}".format(start_epoch))
with EventStorage(start_iter) as storage:
for epoch in range(start_epoch, max_epoch):
storage.epoch = epoch
for _ in range(iters_per_epoch):
data = next(data_loader_iter)
storage.iter = iteration
loss_dict = model(data)
losses = sum(loss_dict.values())
assert torch.isfinite(losses).all(), loss_dict
loss_dict_reduced = {k: v.item() for k, v in comm.reduce_dict(loss_dict).items()}
losses_reduced = sum(loss for loss in loss_dict_reduced.values())
if comm.is_main_process():
storage.put_scalars(total_loss=losses_reduced, **loss_dict_reduced)
optimizer.zero_grad()
losses.backward()
optimizer.step()
storage.put_scalar("lr", optimizer.param_groups[0]["lr"], smoothing_hint=False)
if iteration - start_iter > 5 and \
((iteration + 1) % 200 == 0 or iteration == max_iter - 1) and \
((iteration + 1) % iters_per_epoch != 0):
for writer in writers:
writer.write()
iteration += 1
if iteration <= warmup_iters:
scheduler["warmup_sched"].step()
# Write metrics after each epoch
for writer in writers:
writer.write()
if iteration > warmup_iters and (epoch + 1) > delay_epochs:
scheduler["lr_sched"].step()
if (
cfg.TEST.EVAL_PERIOD > 0
and (epoch + 1) % cfg.TEST.EVAL_PERIOD == 0
and iteration != max_iter - 1
):
results = do_test(cfg, model)
# Compared to "train_net.py", the test results are not dumped to EventStorage
else:
results = {}
flatten_results = flatten_results_dict(results)
metric_dict = dict(metric=flatten_results[metric_name] if metric_name in flatten_results else -1)
periodic_checkpointer.step(epoch, **metric_dict)
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(cfg, args)
return cfg
def main(args):
cfg = setup(args)
model = build_model(cfg)
logger.info("Model:\n{}".format(model))
if args.eval_only:
cfg.defrost()
cfg.MODEL.BACKBONE.PRETRAIN = False
Checkpointer(model).load(cfg.MODEL.WEIGHTS) # load trained model
return do_test(cfg, model)
distributed = comm.get_world_size() > 1
if distributed:
model = DistributedDataParallel(
model, device_ids=[comm.get_local_rank()], broadcast_buffers=False
)
do_train(cfg, model, resume=args.resume)
return do_test(cfg, model)
if __name__ == "__main__":
args = default_argument_parser().parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
#!/usr/bin/env python
# encoding: utf-8
"""
@author: sherlock
@contact: sherlockliao01@gmail.com
"""
import sys
sys.path.append('.')
from fastreid.config import get_cfg
from fastreid.engine import DefaultTrainer, default_argument_parser, default_setup, launch
from fastreid.utils.checkpoint import Checkpointer
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(cfg, args)
return cfg
def main(args):
cfg = setup(args)
if args.eval_only:
cfg.defrost()
cfg.MODEL.BACKBONE.PRETRAIN = False
model = DefaultTrainer.build_model(cfg)
Checkpointer(model).load(cfg.MODEL.WEIGHTS) # load trained model
res = DefaultTrainer.test(cfg, model)
return res
trainer = DefaultTrainer(cfg)
trainer.resume_or_load(resume=args.resume)
return trainer.train()
if __name__ == "__main__":
args = default_argument_parser().parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment