Commit e1aa783c authored by sugon_cxj's avatar sugon_cxj
Browse files

first commit

parent 2e9800bb
Pipeline #527 canceled with stages
import torch
import torch.nn as nn
import torch.nn.functional as F
import time
conv1_first_time = 0
bn1_first_time = 0
nl1_first_time = 0
conv1_time = 0
bn1_time = 0
nl1_time = 0
conv2_time = 0
bn2_time = 0
nl2_time = 0
conv3_time = 0
bn3_time = 0
se_avg_time = 0
se_linear1_time = 0
se_nl1_time = 0
se_linear2_time = 0
se_nl2_time = 0
se_mult_time = 0
conv2_last_time = 0
bn2_last_time = 0
nl2_last_time = 0
avg_pool_time = 0
linear_time = 0
class Block(nn.Module):
def __init__(self, in_planes, exp_factor, out_planes, kernel_size, stride):
super(Block, self).__init__()
self.exp_size = in_planes * exp_factor
self.in_planes = in_planes
self.stride = stride
self.reduction_ratio = 4
# Expansion
self.conv1 = nn.Conv2d(in_planes, self.exp_size, kernel_size=1, stride=1, padding=0, bias=False)
self.bn1 = nn.BatchNorm2d(num_features=self.exp_size)
self.nl1 = nn.SiLU() # non-linearity
# Depthwise Convolution
self.conv2 = nn.Conv2d(self.exp_size, self.exp_size, kernel_size=kernel_size, stride=stride,
padding=(kernel_size - 1) // 2, groups=self.exp_size, bias=False)
self.bn2 = nn.BatchNorm2d(self.exp_size)
self.nl2 = nn.SiLU() # non-linearity
# Squeeze-and-Excite
self.se_avg_pool = nn.AdaptiveAvgPool2d(1)
self.se_linear1 = nn.Linear(self.exp_size, self.exp_size // self.reduction_ratio, bias=False)
self.se_nl1 = nn.SiLU()
self.se_linear2 = nn.Linear(self.exp_size // self.reduction_ratio, self.exp_size, bias=False)
self.se_nl2 = nn.Sigmoid()
# Linear Pointwise Convolution
self.conv3 = nn.Conv2d(self.exp_size, out_planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn3 = nn.BatchNorm2d(out_planes)
self.shortcut = nn.Sequential()
if stride == 1 and in_planes != out_planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(out_planes),
)
def forward(self, x, expand=False):
global conv1_time, bn1_time, nl1_time, conv2_time, bn2_time, \
nl2_time, se_avg_time, se_linear1_time, se_nl1_time, \
se_linear2_time, se_nl2_time, se_mult_time, conv3_time, bn3_time
# Conv1
start = time.time()
out = self.conv1(x)
conv1_time += (time.time() - start)
start = time.time()
out = self.bn1(out)
bn1_time += (time.time() - start)
start = time.time()
out = self.nl1(out)
nl1_time += (time.time() - start)
start = time.time()
# Conv2
out = self.conv2(out)
conv2_time += (time.time() - start)
start = time.time()
out = self.bn2(out)
bn2_time += (time.time() - start)
start = time.time()
out = self.nl2(out)
nl2_time += (time.time() - start)
# SE
batch_size, channel_num, _, _ = out.size()
start = time.time()
out_se = self.se_avg_pool(out).view(batch_size, channel_num)
se_avg_time += (time.time() - start)
start = time.time()
out_se = self.se_linear1(out_se)
se_linear1_time += (time.time() - start)
start = time.time()
out_se = self.se_nl1(out_se)
se_nl1_time += (time.time() - start)
start = time.time()
out_se = self.se_linear2(out_se)
se_linear2_time += (time.time() - start)
start = time.time()
out_se = self.se_nl2(out_se)
se_nl2_time += (time.time() - start)
out_se = out_se.view(batch_size, channel_num, 1, 1)
start = time.time()
out = out * out_se
se_mult_time += (time.time() - start)
# Conv3
start = time.time()
out = self.conv3(out)
conv3_time += (time.time() - start)
start = time.time()
out = self.bn3(out)
bn3_time += (time.time() - start)
# Residual
out = out + self.shortcut(x) if self.stride == 1 else out
return out
class EfficientNet(nn.Module):
def __init__(self, mode='small', num_classes=10):
super(EfficientNet, self).__init__()
self.cfg = [
# expansion, out_planes, num_blocks, kernel_size, stride
[1, 16, 1, 3, 1], # NOTE: change stride 2 -> 1 for CIFAR10
[6, 24, 2, 3, 1],
[6, 40, 2, 5, 2],
[6, 80, 3, 3, 2],
[6, 112, 3, 5, 1],
[6, 192, 4, 5, 2],
[6, 320, 1, 3, 1] # NOTE: change stride 2 -> 1 for CIFAR10
]
self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.nl1 = nn.SiLU() # non-linearity
self.layers = []
in_planes = 32
# Block
layer = []
for expansion, out_planes, num_blocks, kernel, stride in self.cfg:
strides = [stride] + [1] * (num_blocks - 1)
for stride in strides:
# in_planes, exp_size, out_planes, kernel_size, stride
layer.append(Block(in_planes, expansion, out_planes, kernel, stride))
in_planes = out_planes
self.layers = nn.Sequential(*layer)
self.conv2 = nn.Conv2d(out_planes, 1280, kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(1280)
self.nl2 = nn.SiLU()
self.linear = nn.Linear(1280, num_classes)
self.mode = 1
def change_mode(self):
self.mode = 2
def forward(self, x):
global conv1_first_time, bn1_first_time, nl1_first_time, conv1_time, \
bn1_time, nl1_time, conv2_time, bn2_time, nl2_time, se_avg_time, \
se_linear1_time, se_nl1_time, se_linear2_time, se_nl2_time, \
se_mult_time, conv3_time, bn3_time, conv2_last_time, bn2_last_time, \
nl2_last_time, avg_pool_time, linear_time
# first
start = time.time()
out = self.conv1(x)
conv1_first_time += (time.time() - start)
start = time.time()
out = self.bn1(out)
bn1_first_time += (time.time() - start)
start = time.time()
out = self.nl1(out)
nl1_first_time += (time.time() - start)
# blocks
out = self.layers(out)
# 1x1 Conv
start = time.time()
out = self.conv2(out)
conv2_last_time += (time.time() - start)
start = time.time()
out = self.bn2(out)
bn2_last_time += (time.time() - start)
start = time.time()
out = self.nl2(out)
nl2_last_time += (time.time() - start)
# NOTE: change pooling kernel_size 7 -> 4 for CIFAR10
start = time.time()
out = F.avg_pool2d(out, 4)
avg_pool_time += (time.time() - start)
out = out.view(out.size(0), -1)
# Linear
start = time.time()
out = self.linear(out)
linear_time += (time.time() - start)
# Pruning
if self.mode == 1:
return out
# Measurement
return out, conv1_first_time, bn1_first_time, nl1_first_time, conv1_time, \
bn1_time, nl1_time, conv2_time, bn2_time, nl2_time, se_avg_time, \
se_linear1_time, se_nl1_time, se_linear2_time, se_nl2_time, se_mult_time, \
conv3_time, bn3_time, conv2_last_time, bn2_last_time, nl2_last_time, \
avg_pool_time, linear_time
def test():
net = EfficientNet()
x = torch.randn(2, 3, 32, 32)
y = net(x)
print(y.size())
# test()
def efficientnet(num_classes=10):
return EfficientNet(num_classes=num_classes)
\ No newline at end of file
'''MobileNetV2 in PyTorch.
See the paper "Inverted Residuals and Linear Bottlenecks:
Mobile Networks for Classification, Detection and Segmentation" for more details.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import time
conv1_first_time = 0
bn1_first_time = 0
relu1_first_time = 0
conv1_time = 0
bn1_time = 0
relu1_time = 0
conv2_time = 0
bn2_time = 0
relu2_time = 0
conv3_time = 0
bn3_time = 0
conv2_last_time = 0
bn2_last_time = 0
relu2_last_time = 0
avg_pool_time = 0
linear_time = 0
class Block(nn.Module):
'''expand + depthwise + one'''
def __init__(self, in_planes, out_planes, expansion, stride):
super(Block, self).__init__()
self.stride = stride
planes = expansion * in_planes
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, groups=planes, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn3 = nn.BatchNorm2d(out_planes)
self.shortcut = nn.Sequential()
if stride == 1 and in_planes != out_planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(out_planes),
)
def forward(self, x):
global conv1_time, bn1_time, relu1_time, conv2_time, bn2_time, \
relu2_time, conv3_time, bn3_time
# Expand
start = time.time()
out = self.conv1(x)
conv1_time += (time.time() - start)
start = time.time()
out = self.bn1(out)
bn1_time += (time.time() - start)
start = time.time()
out = F.relu6(out)
relu1_time += (time.time() - start)
# Depthwise Conv
start = time.time()
out = self.conv2(out)
conv2_time += (time.time() - start)
start = time.time()
out = self.bn2(out)
bn2_time += (time.time() - start)
start = time.time()
out = F.relu6(out)
relu2_time += (time.time() - start)
# Pointwise Conv
start = time.time()
out = self.conv3(out)
conv3_time += (time.time() - start)
start = time.time()
out = self.bn3(out)
bn3_time += (time.time() - start)
# Residual
# shortcut time?
out = out + self.shortcut(x) if self.stride==1 else out
return out
class MobileNetV2(nn.Module):
# (expansion, out_planes, num_blocks, stride)
cfg = [(1, 16, 1, 1),
(6, 24, 2, 1), # NOTE: change stride 2 -> 1 for CIFAR10
(6, 32, 3, 2),
(6, 64, 4, 2),
(6, 96, 3, 1),
(6, 160, 3, 2),
(6, 320, 1, 1)]
def __init__(self, num_classes=10):
super(MobileNetV2, self).__init__()
# NOTE: change conv1 stride 2 -> 1 for CIFAR10
self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.layers = self._make_layers(in_planes=32)
self.conv2 = nn.Conv2d(320, 1280, kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(1280)
self.linear = nn.Linear(1280, num_classes)
self.mode = 1
def change_mode(self):
self.mode = 2
def _make_layers(self, in_planes):
layers = []
for expansion, out_planes, num_blocks, stride in self.cfg:
strides = [stride] + [1]*(num_blocks-1)
for stride in strides:
layers.append(Block(in_planes, out_planes, expansion, stride))
in_planes = out_planes
return nn.Sequential(*layers)
def forward(self, x):
global conv1_first_time, bn1_first_time, relu1_first_time, conv1_time, \
bn1_time, relu1_time, conv2_time, bn2_time, relu2_time, conv3_time, bn3_time, \
conv2_last_time, bn2_last_time, relu2_last_time, avg_pool_time, linear_time
# first
start = time.time()
out = self.conv1(x)
conv1_first_time += (time.time() - start)
start = time.time()
out = self.bn1(out)
bn1_first_time += (time.time() - start)
start = time.time()
out = F.relu6(out)
relu1_first_time += (time.time() - start)
# blocks
out = self.layers(out)
# 1x1 Conv
start = time.time()
out = self.conv2(out)
conv2_last_time += (time.time() - start)
start = time.time()
out = self.bn2(out)
bn2_last_time += (time.time() - start)
start = time.time()
out = F.relu6(out)
relu2_last_time += (time.time() - start)
# Avg Pooling
# NOTE: change pooling kernel_size 7 -> 4 for CIFAR10
start = time.time()
out = F.avg_pool2d(out, 4)
avg_pool_time += (time.time() - start)
out = out.view(out.size(0), -1)
# Linear
start = time.time()
out = self.linear(out)
linear_time += (time.time() - start)
# Pruning
if self.mode == 1:
return out
# Measurement
return out, conv1_first_time, bn1_first_time, relu1_first_time, conv1_time, \
bn1_time, relu1_time, conv2_time, bn2_time, relu2_time, conv3_time, bn3_time, \
conv2_last_time, bn2_last_time, relu2_last_time, avg_pool_time, linear_time
def test():
net = MobileNetV2()
x = torch.randn(2,3,32,32)
y = net(x)
print(y.size())
# test()
def mobilenetv2(num_classes=10):
return MobileNetV2(num_classes=num_classes)
\ No newline at end of file
'''MobileNetV3 in PyTorch.
See the paper "Searching for MobileNetV3"
for more details.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import time
conv1_first_time = 0
bn1_first_time = 0
nl1_first_time = 0
conv1_time = 0
bn1_time = 0
nl1_time = 0
conv2_time = 0
bn2_time = 0
nl2_time = 0
conv3_time = 0
bn3_time = 0
se_avg_time = 0
se_linear1_time = 0
se_nl1_time = 0
se_linear2_time = 0
se_nl2_time = 0
se_mult_time = 0
conv2_last_time = 0
bn2_last_time = 0
nl2_last_time = 0
avg_pool_time = 0
conv3_last_time = 0
nl3_last_time = 0
linear_time = 0
class H_sigmoid(nn.Module):
def __init__(self):
super(H_sigmoid, self).__init__()
def forward(self, x):
return F.relu6(x + 3) / 6
class H_swish(nn.Module):
def __init__(self):
super(H_swish, self).__init__()
def forward(self, x):
return x * F.relu6(x + 3) / 6
class Block(nn.Module):
def __init__(self, in_planes, exp_size, out_planes, kernel_size, stride, use_SE, NL):
super(Block, self).__init__()
use_HS = NL == 'HS'
self.exp_size = exp_size
self.in_planes = in_planes
self.stride = stride
self.reduction_ratio = 4
self.use_SE = use_SE
# Expansion
self.conv1 = nn.Conv2d(in_planes, exp_size, kernel_size=1, stride=1, padding=0, bias=False)
self.bn1 = nn.BatchNorm2d(num_features=exp_size)
self.nl1 = nn.ReLU() # non-linearity
if use_HS:
self.nl1 = H_swish()
# Depthwise Convolution
self.conv2 = nn.Conv2d(exp_size, exp_size, kernel_size=kernel_size, stride=stride,
padding=(kernel_size - 1) // 2, groups=exp_size, bias=False)
self.bn2 = nn.BatchNorm2d(exp_size)
self.nl2 = nn.ReLU() # non-linearity
if use_HS:
self.nl2 = H_swish()
# Squeeze-and-Excite
if use_SE:
self.se_avg_pool = nn.AdaptiveAvgPool2d(1)
self.se_linear1 = nn.Linear(exp_size, exp_size // self.reduction_ratio, bias=False)
self.se_nl1 = nn.ReLU()
self.se_linear2 = nn.Linear(exp_size // self.reduction_ratio, exp_size, bias=False)
self.se_nl2 = H_sigmoid()
# Linear Pointwise Convolution
self.conv3 = nn.Conv2d(exp_size, out_planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn3 = nn.BatchNorm2d(out_planes)
self.shortcut = nn.Sequential()
if stride == 1 and in_planes != out_planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(out_planes),
)
def forward(self, x, expand=False):
global conv1_time, bn1_time, nl1_time, conv2_time, bn2_time, \
nl2_time, se_avg_time, se_linear1_time, se_nl1_time, \
se_linear2_time, se_nl2_time, se_mult_time, conv3_time, bn3_time
# Conv1
start = time.time()
out = self.conv1(x)
conv1_time += (time.time() - start)
start = time.time()
out = self.bn1(out)
bn1_time += (time.time() - start)
start = time.time()
out = self.nl1(out)
nl1_time += (time.time() - start)
# Conv2
start = time.time()
out = self.conv2(out)
conv2_time += (time.time() - start)
start = time.time()
out = self.bn2(out)
bn2_time += (time.time() - start)
start = time.time()
out = self.nl2(out)
nl2_time += (time.time() - start)
# SE
if self.use_SE:
batch_size, channel_num, _, _ = out.size()
start = time.time()
out_se = self.se_avg_pool(out).view(batch_size, channel_num)
se_avg_time += (time.time() - start)
start = time.time()
out_se = self.se_linear1(out_se)
se_linear1_time += (time.time() - start)
start = time.time()
out_se = self.se_nl1(out_se)
se_nl1_time += (time.time() - start)
start = time.time()
out_se = self.se_linear2(out_se)
se_linear2_time += (time.time() - start)
start = time.time()
out_se = self.se_nl2(out_se)
se_nl2_time += (time.time() - start)
out_se = out_se.view(batch_size, channel_num, 1, 1)
start = time.time()
out = out*out_se
se_mult_time += (time.time() - start)
# Conv3
start = time.time()
out = self.conv3(out)
conv3_time += (time.time() - start)
start = time.time()
out = self.bn3(out)
bn3_time += (time.time() - start)
# Residual
out = out + self.shortcut(x) if self.stride == 1 else out
return out
class MobileNetV3(nn.Module):
def __init__(self, case='small', num_classes=10):
super(MobileNetV3, self).__init__()
if case == 'large': # MobileNetV3-Large
self.cfg = [
# kernel_size, expansion, out_planes, SE, NL, stride
[3, 16, 16, False, 'RE', 1],
[3, 64, 24, False, 'RE', 1], # NOTE: change stride 2 -> 1 for CIFAR10
[3, 72, 24, False, 'RE', 1],
[5, 72, 40, True, 'RE', 2],
[5, 120, 40, True, 'RE', 1],
[5, 120, 40, True, 'RE', 1],
[3, 240, 80, False, 'HS', 2],
[3, 200, 80, False, 'HS', 1],
[3, 184, 80, False, 'HS', 1],
[3, 184, 80, False, 'HS', 1],
[3, 480, 112, True, 'HS', 1],
[3, 672, 112, True, 'HS', 1],
[5, 672, 160, True, 'HS', 2],
[5, 960, 160, True, 'HS', 1],
[5, 960, 160, True, 'HS', 1]
]
elif case == 'small': # MobileNetV3-Small
self.cfg = [
# kernel_size, expansion, out_planes, use_SE, NL, stride
[3, 16, 16, True, 'RE', 1], # NOTE: change stride 2 -> 1 for CIFAR10
[3, 72, 24, False, 'RE', 2],
[3, 88, 24, False, 'RE', 1],
[5, 96, 40, True, 'HS', 2],
[5, 240, 40, True, 'HS', 1],
[5, 240, 40, True, 'HS', 1],
[5, 120, 48, True, 'HS', 1],
[5, 144, 48, True, 'HS', 1],
[5, 288, 96, True, 'HS', 2],
[5, 576, 96, True, 'HS', 1],
[5, 576, 96, True, 'HS', 1]
]
last_channels_num = 1280 if case == 'large' else 1024
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.nl1 = H_swish() # non-linearity
self.layer = []
in_planes = 16
# Blocks
for kernel_size, exp_size, out_planes, use_SE, NL, stride in self.cfg:
self.layer.append(Block(in_planes, exp_size, out_planes, kernel_size, stride, use_SE, NL))
in_planes = out_planes
self.layers = nn.Sequential(*self.layer)
out_planes = exp_size
self.conv2 = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.nl2 = H_swish()
self.conv3 = nn.Conv2d(out_planes, last_channels_num, kernel_size=1, stride=1, padding=0, bias=False)
self.nl3 = H_swish()
self.linear = nn.Linear(last_channels_num, num_classes)
self.mode = 1
def change_mode(self):
self.mode = 2
def forward(self, x):
global conv1_first_time, bn1_first_time, nl1_first_time, conv1_time, \
bn1_time, nl1_time, conv2_time, bn2_time, nl2_time, se_avg_time, \
se_linear1_time, se_nl1_time, se_linear2_time, se_nl2_time, \
se_mult_time, conv3_time, bn3_time, conv2_last_time, bn2_last_time, \
nl2_last_time, avg_pool_time, conv3_last_time, nl3_last_time, linear_time
# first
start = time.time()
out = self.conv1(x)
conv1_first_time += (time.time() - start)
start = time.time()
out = self.bn1(out)
bn1_first_time += (time.time() - start)
start = time.time()
out = self.nl1(out)
nl1_first_time += (time.time() - start)
# blocks
out = self.layers(out)
# 1x1 Conv
start = time.time()
out = self.conv2(out)
conv2_last_time += (time.time() - start)
start = time.time()
out = self.bn2(out)
bn2_last_time += (time.time() - start)
start = time.time()
out = self.nl2(out)
nl2_last_time += (time.time() - start)
# Avg Pooling
# NOTE: change pooling kernel_size 7 -> 4 for CIFAR10
start = time.time()
out = F.avg_pool2d(out, 4)
avg_pool_time += (time.time() - start)
# 1x1 Conv
start = time.time()
out = self.conv3(out)
conv3_last_time += (time.time() - start)
start = time.time()
out = self.nl3(out)
nl3_last_time += (time.time() - start)
out = out.view(out.size(0), -1)
# Linear
start = time.time()
out = self.linear(out)
linear_time += (time.time() - start)
# Pruning
if self.mode == 1:
return out
# Measurement
return out, conv1_first_time, bn1_first_time, nl1_first_time, conv1_time, \
bn1_time, nl1_time, conv2_time, bn2_time, nl2_time, se_avg_time, \
se_linear1_time, se_nl1_time, se_linear2_time, se_nl2_time, se_mult_time, \
conv3_time, bn3_time, conv2_last_time, bn2_last_time, nl2_last_time, \
avg_pool_time, conv3_last_time, nl3_last_time, linear_time
def test():
net = MobileNetV3()
x = torch.randn(2, 3, 32, 32)
y = net(x)
print(y.size())
# test()
def mobilenetv3(num_classes=10):
return MobileNetV3(num_classes=num_classes)
import imp
from . import evaluator, metrics, utils
from .utils import get_logger, MagnitudeRecover
from tqdm import tqdm
import torch.nn.functional as F
import torch
from . import metrics
class Evaluator(object):
def __init__(self, metric, dataloader):
self.dataloader = dataloader
self.metric = metric
def eval(self, model, device=None, progress=False):
self.metric.reset()
with torch.no_grad():
for i, (inputs, targets) in enumerate( tqdm(self.dataloader, disable=not progress) ):
inputs, targets = inputs.cuda(), targets.cuda()
outputs = model( inputs )
self.metric.update(outputs, targets)
return self.metric.get_results()
def __call__(self, *args, **kwargs):
return self.eval(*args, **kwargs)
def classification_evaluator(dataloader):
metric = metrics.MetricCompose({
'Acc': metrics.TopkAccuracy(),
'Loss': metrics.RunningLoss(torch.nn.CrossEntropyLoss(reduction='sum'))
})
return Evaluator( metric, dataloader=dataloader)
def segmentation_evaluator(dataloader, num_classes, ignore_idx=255):
cm = metrics.ConfusionMatrix(num_classes, ignore_idx=ignore_idx)
metric = metrics.MetricCompose({
'mIoU': metrics.mIoU(cm),
'Acc': metrics.Accuracy(),
'Loss': metrics.RunningLoss(torch.nn.CrossEntropyLoss(reduction='sum'))
})
return Evaluator( metric, dataloader=dataloader)
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment