Commit 2116189f authored by Yuge Zhang's avatar Yuge Zhang Committed by Chi Song
Browse files

NAS refactor initialization (#1676)

parent 76086583
from torchvision import transforms
from torchvision.datasets import CIFAR10
def get_dataset(cls):
MEAN = [0.49139968, 0.48215827, 0.44653124]
STD = [0.24703233, 0.24348505, 0.26158768]
transf = [
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip()
]
normalize = [
transforms.ToTensor(),
transforms.Normalize(MEAN, STD)
]
train_transform = transforms.Compose(transf + normalize)
valid_transform = transforms.Compose(normalize)
if cls == "cifar10":
dataset_train = CIFAR10(root="./data", train=True, download=True, transform=train_transform)
dataset_valid = CIFAR10(root="./data", train=False, download=True, transform=valid_transform)
else:
raise NotImplementedError
return dataset_train, dataset_valid
import torch
import torch.nn as nn
PRIMITIVES = [
'max_pool_3x3',
'avg_pool_3x3',
'skip_connect', # identity
'sep_conv_3x3',
'sep_conv_5x5',
'dil_conv_3x3',
'dil_conv_5x5',
'none'
]
OPS = {
'none': lambda C, stride, affine: Zero(stride),
'avg_pool_3x3': lambda C, stride, affine: PoolBN('avg', C, 3, stride, 1, affine=affine),
'max_pool_3x3': lambda C, stride, affine: PoolBN('max', C, 3, stride, 1, affine=affine),
'skip_connect': lambda C, stride, affine: \
Identity() if stride == 1 else FactorizedReduce(C, C, affine=affine),
'sep_conv_3x3': lambda C, stride, affine: SepConv(C, C, 3, stride, 1, affine=affine),
'sep_conv_5x5': lambda C, stride, affine: SepConv(C, C, 5, stride, 2, affine=affine),
'sep_conv_7x7': lambda C, stride, affine: SepConv(C, C, 7, stride, 3, affine=affine),
'dil_conv_3x3': lambda C, stride, affine: DilConv(C, C, 3, stride, 2, 2, affine=affine), # 5x5
'dil_conv_5x5': lambda C, stride, affine: DilConv(C, C, 5, stride, 4, 2, affine=affine), # 9x9
'conv_7x1_1x7': lambda C, stride, affine: FacConv(C, C, 7, stride, 3, affine=affine)
}
def drop_path_(x, drop_prob, training):
if training and drop_prob > 0.:
keep_prob = 1. - drop_prob
# per data point mask; assuming x in cuda.
mask = torch.cuda.FloatTensor(x.size(0), 1, 1, 1).bernoulli_(keep_prob)
x.div_(keep_prob).mul_(mask)
return x
class DropPath_(nn.Module):
def __init__(self, p=0.):
""" [!] DropPath is inplace module
Args:
p: probability of an path to be zeroed.
"""
super().__init__()
self.p = p
def extra_repr(self):
return 'p={}, inplace'.format(self.p)
def forward(self, x):
drop_path_(x, self.p, self.training)
return x
class PoolBN(nn.Module):
"""
AvgPool or MaxPool - BN
"""
def __init__(self, pool_type, C, kernel_size, stride, padding, affine=True):
"""
Args:
pool_type: 'max' or 'avg'
"""
super().__init__()
if pool_type.lower() == 'max':
self.pool = nn.MaxPool2d(kernel_size, stride, padding)
elif pool_type.lower() == 'avg':
self.pool = nn.AvgPool2d(kernel_size, stride, padding, count_include_pad=False)
else:
raise ValueError()
self.bn = nn.BatchNorm2d(C, affine=affine)
def forward(self, x):
out = self.pool(x)
out = self.bn(out)
return out
class StdConv(nn.Module):
""" Standard conv
ReLU - Conv - BN
"""
def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True):
super().__init__()
self.net = nn.Sequential(
nn.ReLU(),
nn.Conv2d(C_in, C_out, kernel_size, stride, padding, bias=False),
nn.BatchNorm2d(C_out, affine=affine)
)
def forward(self, x):
return self.net(x)
class FacConv(nn.Module):
""" Factorized conv
ReLU - Conv(Kx1) - Conv(1xK) - BN
"""
def __init__(self, C_in, C_out, kernel_length, stride, padding, affine=True):
super().__init__()
self.net = nn.Sequential(
nn.ReLU(),
nn.Conv2d(C_in, C_in, (kernel_length, 1), stride, padding, bias=False),
nn.Conv2d(C_in, C_out, (1, kernel_length), stride, padding, bias=False),
nn.BatchNorm2d(C_out, affine=affine)
)
def forward(self, x):
return self.net(x)
class DilConv(nn.Module):
""" (Dilated) depthwise separable conv
ReLU - (Dilated) depthwise separable - Pointwise - BN
If dilation == 2, 3x3 conv => 5x5 receptive field
5x5 conv => 9x9 receptive field
"""
def __init__(self, C_in, C_out, kernel_size, stride, padding, dilation, affine=True):
super().__init__()
self.net = nn.Sequential(
nn.ReLU(),
nn.Conv2d(C_in, C_in, kernel_size, stride, padding, dilation=dilation, groups=C_in,
bias=False),
nn.Conv2d(C_in, C_out, 1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(C_out, affine=affine)
)
def forward(self, x):
return self.net(x)
class SepConv(nn.Module):
""" Depthwise separable conv
DilConv(dilation=1) * 2
"""
def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True):
super().__init__()
self.net = nn.Sequential(
DilConv(C_in, C_in, kernel_size, stride, padding, dilation=1, affine=affine),
DilConv(C_in, C_out, kernel_size, 1, padding, dilation=1, affine=affine)
)
def forward(self, x):
return self.net(x)
class Identity(nn.Module):
def forward(self, x):
return x
class Zero(nn.Module):
def __init__(self, stride):
super().__init__()
self.stride = stride
def forward(self, x):
if self.stride == 1:
return x * 0.
# re-sizing by stride
return x[:, :, ::self.stride, ::self.stride] * 0.
class FactorizedReduce(nn.Module):
"""
Reduce feature map size by factorized pointwise(stride=2).
"""
def __init__(self, C_in, C_out, affine=True):
super().__init__()
self.relu = nn.ReLU()
self.conv1 = nn.Conv2d(C_in, C_out // 2, 1, stride=2, padding=0, bias=False)
self.conv2 = nn.Conv2d(C_in, C_out // 2, 1, stride=2, padding=0, bias=False)
self.bn = nn.BatchNorm2d(C_out, affine=affine)
def forward(self, x):
x = self.relu(x)
out = torch.cat([self.conv1(x), self.conv2(x[:, :, 1:, 1:])], dim=1)
out = self.bn(out)
return out
from argparse import ArgumentParser
import datasets
import image_ops as ops
import nni.nas.pytorch as nas
import torch
import torch.nn as nn
from nni.nas.pytorch.darts import DartsTrainer
class SearchCell(nn.Module):
"""
Cell for search.
"""
def __init__(self, n_nodes, channels_pp, channels_p, channels, reduction_p, reduction):
"""
Initialization a search cell.
Parameters
----------
n_nodes: int
Number of nodes in current DAG.
channels_pp: int
Number of output channels from previous previous cell.
channels_p: int
Number of output channels from previous cell.
channels: int
Number of channels that will be used in the current DAG.
reduction_p: bool
Flag for whether the previous cell is reduction cell or not.
reduction: bool
Flag for whether the current cell is reduction cell or not.
"""
super().__init__()
self.reduction = reduction
self.n_nodes = n_nodes
# If previous cell is reduction cell, current input size does not match with
# output size of cell[k-2]. So the output[k-2] should be reduced by preprocessing.
if reduction_p:
self.preproc0 = ops.FactorizedReduce(channels_pp, channels, affine=False)
else:
self.preproc0 = ops.StdConv(channels_pp, channels, 1, 1, 0, affine=False)
self.preproc1 = ops.StdConv(channels_p, channels, 1, 1, 0, affine=False)
# generate dag
self.mutable_ops = nn.ModuleList()
for depth in range(self.n_nodes):
self.mutable_ops.append(nn.ModuleList())
for i in range(2 + depth): # include 2 input nodes
# reduction should be used only for input node
stride = 2 if reduction and i < 2 else 1
op = nas.mutables.LayerChoice([ops.PoolBN('max', channels, 3, stride, 1, affine=False),
ops.PoolBN('avg', channels, 3, stride, 1, affine=False),
ops.Identity() if stride == 1 else
ops.FactorizedReduce(channels, channels, affine=False),
ops.SepConv(channels, channels, 3, stride, 1, affine=False),
ops.SepConv(channels, channels, 5, stride, 2, affine=False),
ops.DilConv(channels, channels, 3, stride, 2, 2, affine=False),
ops.DilConv(channels, channels, 5, stride, 4, 2, affine=False),
ops.Zero(stride)],
key="r{}_d{}_i{}".format(reduction, depth, i))
self.mutable_ops[depth].append(op)
def forward(self, s0, s1):
# s0, s1 are the outputs of previous previous cell and previous cell, respectively.
tensors = [self.preproc0(s0), self.preproc1(s1)]
for ops in self.mutable_ops:
assert len(ops) == len(tensors)
cur_tensor = sum(op(tensor) for op, tensor in zip(ops, tensors))
tensors.append(cur_tensor)
output = torch.cat(tensors[2:], dim=1)
return output
class SearchCNN(nn.Module):
"""
Search CNN model
"""
def __init__(self, in_channels, channels, n_classes, n_layers, n_nodes=4, stem_multiplier=3):
"""
Initializing a search channelsNN.
Parameters
----------
in_channels: int
Number of channels in images.
channels: int
Number of channels used in the network.
n_classes: int
Number of classes.
n_layers: int
Number of cells in the whole network.
n_nodes: int
Number of nodes in a cell.
stem_multiplier: int
Multiplier of channels in STEM.
"""
super().__init__()
self.in_channels = in_channels
self.channels = channels
self.n_classes = n_classes
self.n_layers = n_layers
c_cur = stem_multiplier * self.channels
self.stem = nn.Sequential(
nn.Conv2d(in_channels, c_cur, 3, 1, 1, bias=False),
nn.BatchNorm2d(c_cur)
)
# for the first cell, stem is used for both s0 and s1
# [!] channels_pp and channels_p is output channel size, but c_cur is input channel size.
channels_pp, channels_p, c_cur = c_cur, c_cur, channels
self.cells = nn.ModuleList()
reduction_p, reduction = False, False
for i in range(n_layers):
reduction_p, reduction = reduction, False
# Reduce featuremap size and double channels in 1/3 and 2/3 layer.
if i in [n_layers // 3, 2 * n_layers // 3]:
c_cur *= 2
reduction = True
cell = SearchCell(n_nodes, channels_pp, channels_p, c_cur, reduction_p, reduction)
self.cells.append(cell)
c_cur_out = c_cur * n_nodes
channels_pp, channels_p = channels_p, c_cur_out
self.gap = nn.AdaptiveAvgPool2d(1)
self.linear = nn.Linear(channels_p, n_classes)
def forward(self, x):
s0 = s1 = self.stem(x)
for cell in self.cells:
s0, s1 = s1, cell(s0, s1)
out = self.gap(s1)
out = out.view(out.size(0), -1) # flatten
logits = self.linear(out)
return logits
def accuracy(output, target, topk=(1,)):
""" Computes the precision@k for the specified values of k """
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
# one-hot case
if target.ndimension() > 1:
target = target.max(1)[1]
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = dict()
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res["acc{}".format(k)] = correct_k.mul_(1.0 / batch_size).item()
return res
if __name__ == "__main__":
parser = ArgumentParser("darts")
parser.add_argument("--layers", default=4, type=int)
parser.add_argument("--nodes", default=2, type=int)
parser.add_argument("--batch-size", default=3, type=int)
parser.add_argument("--log-frequency", default=1, type=int)
args = parser.parse_args()
dataset_train, dataset_valid = datasets.get_dataset("cifar10")
model = SearchCNN(3, 16, 10, args.layers, n_nodes=args.nodes)
criterion = nn.CrossEntropyLoss()
optim = torch.optim.SGD(model.parameters(), 0.025, momentum=0.9, weight_decay=3.0E-4)
n_epochs = 50
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, n_epochs, eta_min=0.001)
trainer = DartsTrainer(model,
loss=criterion,
metrics=lambda output, target: accuracy(output, target, topk=(1,)),
model_optim=optim,
lr_scheduler=lr_scheduler,
num_epochs=50,
dataset_train=dataset_train,
dataset_valid=dataset_valid,
batch_size=args.batch_size,
log_frequency=args.log_frequency)
trainer.train()
trainer.finalize()
# augment step
# ...
from .mutator import DartsMutator
from .trainer import DartsTrainer
import torch
from torch import nn as nn
from torch.nn import functional as F
from nni.nas.pytorch.mutables import LayerChoice
from nni.nas.pytorch.mutator import PyTorchMutator
class DartsMutator(PyTorchMutator):
def before_build(self, model):
self.choices = nn.ParameterDict()
def on_init_layer_choice(self, mutable: LayerChoice):
self.choices[mutable.key] = nn.Parameter(1.0E-3 * torch.randn(mutable.length))
def on_forward_layer_choice(self, mutable: LayerChoice, ops, *inputs):
weights = F.softmax(self.choices[mutable.key], dim=-1)
return sum(w * op(*inputs) for w, op in zip(weights, ops))
import copy
import torch
from torch import nn as nn
from nni.nas.pytorch.trainer import Trainer
from nni.nas.utils import AverageMeterGroup, auto_device
from .mutator import DartsMutator
class DartsTrainer(Trainer):
def __init__(self, model, loss, metrics,
model_optim, lr_scheduler, num_epochs, dataset_train, dataset_valid,
mutator=None, batch_size=64, workers=4, device=None, log_frequency=None):
self.model = model
self.loss = loss
self.metrics = metrics
self.mutator = mutator
if self.mutator is None:
self.mutator = DartsMutator(model)
self.model_optim = model_optim
self.lr_scheduler = lr_scheduler
self.num_epochs = num_epochs
self.dataset_train = dataset_train
self.dataset_valid = dataset_valid
self.device = auto_device() if device is None else device
self.log_frequency = log_frequency
self.model.to(self.device)
self.loss.to(self.device)
self.mutator.to(self.device)
self.ctrl_optim = torch.optim.Adam(self.mutator.parameters(), 3.0E-4, betas=(0.5, 0.999),
weight_decay=1.0E-3)
n_train = len(self.dataset_train)
split = n_train // 2
indices = list(range(n_train))
train_sampler = torch.utils.data.sampler.SubsetRandomSampler(indices[:split])
valid_sampler = torch.utils.data.sampler.SubsetRandomSampler(indices[split:])
self.train_loader = torch.utils.data.DataLoader(self.dataset_train,
batch_size=batch_size,
sampler=train_sampler,
num_workers=workers)
self.valid_loader = torch.utils.data.DataLoader(self.dataset_train,
batch_size=batch_size,
sampler=valid_sampler,
num_workers=workers)
def train_epoch(self, epoch):
self.model.train()
self.mutator.train()
lr = self.lr_scheduler.get_lr()[0]
meters = AverageMeterGroup()
for step, ((trn_X, trn_y), (val_X, val_y)) in enumerate(zip(self.train_loader, self.valid_loader)):
trn_X, trn_y = trn_X.to(self.device), trn_y.to(self.device)
val_X, val_y = val_X.to(self.device), val_y.to(self.device)
# backup model for hessian
backup_model = copy.deepcopy(self.model.state_dict())
# cannot deepcopy model because it will break the reference
# phase 1. child network step
self.model_optim.zero_grad()
logits = self.model(trn_X)
loss = self.loss(logits, trn_y)
loss.backward()
# gradient clipping
nn.utils.clip_grad_norm_(self.model.parameters(), 5.)
self.model_optim.step()
new_model = copy.deepcopy(self.model.state_dict())
# phase 2. architect step (alpha)
self.ctrl_optim.zero_grad()
# compute unrolled loss
self._unrolled_backward(trn_X, trn_y, val_X, val_y, backup_model, lr)
self.ctrl_optim.step()
self.model.load_state_dict(new_model)
metrics = self.metrics(logits, trn_y)
metrics["loss"] = loss.item()
meters.update(metrics)
if self.log_frequency is not None and step % self.log_frequency == 0:
print("Epoch {} Step [{}/{}] {}".format(epoch, step, len(self.train_loader), meters))
self.lr_scheduler.step()
def validate_epoch(self, epoch):
self.model.eval()
self.mutator.eval()
meters = AverageMeterGroup()
with torch.no_grad():
for step, (X, y) in enumerate(self.valid_loader):
X, y = X.to(self.device), y.to(self.device)
logits = self.model(X)
metrics = self.metrics(logits, y)
meters.update(metrics)
if self.log_frequency is not None and step % self.log_frequency == 0:
print("Epoch {} Step [{}/{}] {}".format(epoch, step, len(self.valid_loader), meters))
def train(self):
for epoch in range(self.num_epochs):
# training
print("Epoch {} Training".format(epoch))
self.train_epoch(epoch)
# validation
print("Epoch {} Validating".format(epoch))
self.validate_epoch(epoch)
def _unrolled_backward(self, trn_X, trn_y, val_X, val_y, backup_model, lr):
"""
Compute unrolled loss and backward its gradients
Parameters
----------
v_model: backup model before this step
lr: learning rate for virtual gradient step (same as net lr)
"""
loss = self.loss(self.model(val_X), val_y)
w_model = tuple(self.model.parameters())
w_ctrl = tuple(self.mutator.parameters())
w_grads = torch.autograd.grad(loss, w_model + w_ctrl)
d_model = w_grads[:len(w_model)]
d_ctrl = w_grads[len(w_model):]
hessian = self._compute_hessian(backup_model, d_model, trn_X, trn_y)
with torch.no_grad():
for param, d, h in zip(w_ctrl, d_ctrl, hessian):
param.grad = d - lr * h
def _compute_hessian(self, model, dw, trn_X, trn_y):
"""
dw = dw` { L_val(w`, alpha) }
w+ = w + eps * dw
w- = w - eps * dw
hessian = (dalpha { L_trn(w+, alpha) } - dalpha { L_trn(w-, alpha) }) / (2*eps)
eps = 0.01 / ||dw||
"""
self.model.load_state_dict(model)
norm = torch.cat([w.view(-1) for w in dw]).norm()
eps = 0.01 / norm
for e in [eps, -2. * eps]:
# w+ = w + eps*dw`, w- = w - eps*dw`
with torch.no_grad():
for p, d in zip(self.model.parameters(), dw):
p += eps * d
loss = self.loss(self.model(trn_X), trn_y) # TODO: should use model instead of self.model
if e > 0:
dalpha_pos = torch.autograd.grad(loss, self.mutator.parameters()) # dalpha { L_trn(w+) }
elif e < 0:
dalpha_neg = torch.autograd.grad(loss, self.mutator.parameters()) # dalpha { L_trn(w-) }
hessian = [(p - n) / 2. * eps for p, n in zip(dalpha_pos, dalpha_neg)]
return hessian
def finalize(self):
pass
import torch.nn as nn
from nni.nas.utils import global_mutable_counting
class PyTorchMutable(nn.Module):
"""
Mutable is designed to function as a normal layer, with all necessary operators' weights.
States and weights of architectures should be included in mutator, instead of the layer itself.
Mutable has a key, which marks the identity of the mutable. This key can be used by users to share
decisions among different mutables. In mutator's implementation, mutators should use the key to
distinguish different mutables. Mutables that share the same key should be "similar" to each other.
Currently the default scope for keys is global.
"""
def __init__(self, key=None):
super().__init__()
if key is not None:
self.key = key
else:
self.key = self.__class__.__name__ + str(global_mutable_counting())
self.name = self.key
def __deepcopy__(self, memodict=None):
raise NotImplementedError
def set_mutator(self, mutator):
self.__dict__["mutator"] = mutator
def forward(self, *inputs):
raise NotImplementedError("Mutable forward must be implemented")
def __repr__(self):
return "{} ({})".format(self.name, self.key)
def similar(self, other):
return self == other
class LayerChoice(PyTorchMutable):
def __init__(self, ops, key=None):
super().__init__(key=key)
self.length = len(ops)
self.choices = nn.ModuleList(ops)
def forward(self, *inputs):
return self.mutator.on_forward(self, self.choices, *inputs)
def similar(self, other):
return type(self) == type(other) and self.length == other.length
class InputChoice(PyTorchMutable):
def __init__(self, n_candidates, n_selected=None, reduction="mean", return_index=False, key=None):
super().__init__(key=key)
self.n_candidates = n_candidates
self.n_selected = n_selected
self.reduction = reduction
self.return_index = return_index
def forward(self, *inputs):
assert len(inputs) == self.n_candidates, "Length of the input list must be equal to number of candidates."
return self.mutator.on_forward(self, *inputs)
def similar(self, other):
return type(self) == type(other) and \
self.n_candidates == other.n_candidates and self.n_selected and other.n_selected
import logging
from torch import nn as nn
from nni.nas.pytorch.mutables import PyTorchMutable
from nni.nas.utils import to_snake_case
logger = logging.getLogger(__name__)
class PyTorchMutator(nn.Module):
def __init__(self, model):
super().__init__()
self.before_build(model)
self.parse_search_space(model)
self.after_build(model)
def before_build(self, model):
pass
def after_build(self, model):
pass
def named_mutables(self, model):
# if distinct is true, the method will filter out those with duplicated keys
key2module = dict()
for name, module in model.named_modules():
if isinstance(module, PyTorchMutable):
distinct = False
if module.key in key2module:
assert key2module[module.key].similar(module), "Mutable that share the same key must be similar " \
"to each other"
else:
distinct = True
key2module[module.key] = module
yield name, module, distinct
def __setattr__(self, key, value):
if key in ["model", "net", "network"]:
logger.warning("Think twice if you are including the network into mutator.")
return super().__setattr__(key, value)
def parse_search_space(self, model):
for name, mutable, distinct in self.named_mutables(model):
mutable.name = name
mutable.set_mutator(self)
if not distinct:
continue
init_method_name = "on_init_{}".format(to_snake_case(mutable.__class__.__name__))
if hasattr(self, init_method_name) and callable(getattr(self, init_method_name)):
getattr(self, init_method_name)(mutable)
else:
# fallback to general init
self.on_init_general(mutable)
def on_init_general(self, mutable):
pass
def on_forward_general(self, mutable, *inputs):
raise NotImplementedError("Forward has to be implemented")
def on_forward(self, mutable, *inputs):
"""Callback on forwarding a mutable"""
forward_method_name = "on_forward_{}".format(to_snake_case(mutable.__class__.__name__))
if hasattr(self, forward_method_name) and callable(getattr(self, forward_method_name)):
return getattr(self, forward_method_name)(mutable, *inputs)
else:
# fallback to general forward
return self.on_forward_general(mutable, *inputs)
def forward(self, *inputs):
raise NotImplementedError("Mutator is not forward-able")
from abc import ABC, abstractmethod
class Trainer(ABC):
@abstractmethod
def train(self):
raise NotImplementedError
@abstractmethod
def finalize(self):
raise NotImplementedError
import re
from collections import OrderedDict
import torch
_counter = 0
def global_mutable_counting():
global _counter
_counter += 1
return _counter
def to_snake_case(camel_case):
return re.sub('(?!^)([A-Z]+)', r'_\1', camel_case).lower()
def auto_device():
return torch.device("cuda" if torch.cuda.is_available() else "cpu")
class AverageMeterGroup(object):
def __init__(self):
self.meters = OrderedDict()
def update(self, data):
for k, v in data.items():
if k not in self.meters:
self.meters[k] = AverageMeter(k, ":4f")
self.meters[k].update(v)
def __str__(self):
return " ".join(str(v) for _, v in self.meters.items())
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment