Unverified Commit b8d83b0d authored by Hang Zhang's avatar Hang Zhang Committed by GitHub
Browse files

transforms (#272)

parent f70fa97e
......@@ -93,7 +93,7 @@ py::array_t<float> apply_transform(int H, int W, int C, py::array_t<float> img,
auto ctm_buf = ctm.request();
// printf("H: %d, W: %d, C: %d\n", H, W, C);
py::array_t<float> result{img_buf.size};
py::array_t<float> result{(unsigned long)img_buf.size};
auto res_buf = result.request();
float *img_ptr = (float *)img_buf.ptr;
......
......@@ -36,7 +36,7 @@ def get_transform(dataset, base_size=None, crop_size=224, rand_aug=False, etrans
CenterCrop(crop_size),
])
train_transforms.extend([
RandomHorizontalFlip(),
RandomHorizontalFlip(),
ColorJitter(0.4, 0.4, 0.4),
ToTensor(),
Lighting(0.1, _imagenet_pca['eigval'], _imagenet_pca['eigvec']),
......@@ -65,16 +65,16 @@ def get_transform(dataset, base_size=None, crop_size=224, rand_aug=False, etrans
normalize,
])
elif dataset == 'cifar10':
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465),
(0.2023, 0.1994, 0.2010)),
transform_train = Compose([
RandomCrop(32, padding=4),
RandomHorizontalFlip(),
ToTensor(),
Normalize((0.4914, 0.4822, 0.4465),
(0.2023, 0.1994, 0.2010)),
])
transform_val = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465),
transform_val = Compose([
ToTensor(),
Normalize((0.4914, 0.4822, 0.4465),
(0.2023, 0.1994, 0.2010)),
])
return transform_train, transform_val
......
......@@ -29,9 +29,11 @@ class LR_Scheduler(object):
iters_per_epoch: number of iterations per epoch
"""
def __init__(self, mode, base_lr, num_epochs, iters_per_epoch=0,
lr_step=0, warmup_epochs=0):
lr_step=0, warmup_epochs=0, quiet=False):
self.mode = mode
print('Using {} LR scheduler with warm-up epochs of {}!'.format(self.mode, warmup_epochs))
self.quiet = quiet
if not quiet:
print('Using {} LR scheduler with warm-up epochs of {}!'.format(self.mode, warmup_epochs))
if mode == 'step':
assert lr_step
self.base_lr = base_lr
......@@ -57,8 +59,9 @@ class LR_Scheduler(object):
else:
raise NotImplemented
if epoch > self.epoch and (epoch == 0 or best_pred > 0.0):
print('\n=>Epoch %i, learning rate = %.4f, \
previous best = %.4f' % (epoch, lr, best_pred))
if not self.quiet:
print('\n=>Epoch %i, learning rate = %.4f, \
previous best = %.4f' % (epoch, lr, best_pred))
self.epoch = epoch
assert lr >= 0
self._adjust_learning_rate(optimizer, lr)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment