Unverified Commit 3ec26b40 authored by liuzhe-lz's avatar liuzhe-lz Committed by GitHub
Browse files

Merge master into dev-retiarii (#3178)

parent d165905d
import os
import shutil
from pathlib import Path
for root, dirs, files in os.walk('archive_en_US'):
root = Path(root)
for file in files:
moved_root = Path('en_US') / root.relative_to('archive_en_US')
shutil.move(root / file, moved_root / file)
os.remove(moved_root / (Path(file).stem + '.rst'))
...@@ -5,7 +5,7 @@ import torch ...@@ -5,7 +5,7 @@ import torch
import torch.nn as nn import torch.nn as nn
import torch.nn.functional as F import torch.nn.functional as F
from torchvision import datasets, transforms from torchvision import datasets, transforms
from nni.compression.torch import BNNQuantizer from nni.algorithms.compression.pytorch.quantization import BNNQuantizer
class VGG_Cifar10(nn.Module): class VGG_Cifar10(nn.Module):
......
import torch import torch
import torch.nn.functional as F import torch.nn.functional as F
from torchvision import datasets, transforms from torchvision import datasets, transforms
from nni.compression.torch import DoReFaQuantizer from nni.algorithms.compression.pytorch.quantization import DoReFaQuantizer
class Mnist(torch.nn.Module): class Mnist(torch.nn.Module):
...@@ -86,4 +86,4 @@ def main(): ...@@ -86,4 +86,4 @@ def main():
if __name__ == '__main__': if __name__ == '__main__':
main() main()
\ No newline at end of file
...@@ -3,7 +3,7 @@ import torch ...@@ -3,7 +3,7 @@ import torch
import torch.nn as nn import torch.nn as nn
import torch.nn.functional as F import torch.nn.functional as F
from torchvision import datasets, transforms from torchvision import datasets, transforms
from nni.compression.torch import L1FilterPruner from nni.algorithms.compression.pytorch.pruning import L1FilterPruner
from models.cifar10.vgg import VGG from models.cifar10.vgg import VGG
......
import torch import torch
import torch.nn.functional as F import torch.nn.functional as F
from torchvision import datasets, transforms from torchvision import datasets, transforms
from nni.compression.torch import QAT_Quantizer from nni.algorithms.compression.pytorch.quantization import QAT_Quantizer
class Mnist(torch.nn.Module): class Mnist(torch.nn.Module):
......
...@@ -8,7 +8,7 @@ import time ...@@ -8,7 +8,7 @@ import time
import torch import torch
import torch.nn as nn import torch.nn as nn
from torchvision.models import resnet from torchvision.models import resnet
from nni.compression.torch import AMCPruner from nni.algorithms.compression.pytorch.pruning import AMCPruner
from data import get_split_dataset from data import get_split_dataset
from utils import AverageMeter, accuracy from utils import AverageMeter, accuracy
......
...@@ -15,9 +15,9 @@ import torch.optim as optim ...@@ -15,9 +15,9 @@ import torch.optim as optim
from tensorboardX import SummaryWriter from tensorboardX import SummaryWriter
from torchvision.models import resnet from torchvision.models import resnet
from nni.compression.torch.pruning.amc.lib.net_measure import measure_model from nni.algorithms.compression.pytorch.pruning.amc.lib.net_measure import measure_model
from nni.compression.torch.pruning.amc.lib.utils import get_output_folder from nni.algorithms.compression.pytorch.pruning.amc.lib.utils import get_output_folder
from nni.compression.torch import ModelSpeedup from nni.compression.pytorch import ModelSpeedup
from data import get_dataset from data import get_dataset
from utils import AverageMeter, accuracy, progress_bar from utils import AverageMeter, accuracy, progress_bar
......
...@@ -14,10 +14,10 @@ from torchvision import datasets, transforms ...@@ -14,10 +14,10 @@ from torchvision import datasets, transforms
from models.mnist.lenet import LeNet from models.mnist.lenet import LeNet
from models.cifar10.vgg import VGG from models.cifar10.vgg import VGG
from models.cifar10.resnet import ResNet18, ResNet50 from models.cifar10.resnet import ResNet18, ResNet50
from nni.compression.torch import L1FilterPruner, L2FilterPruner, FPGMPruner from nni.algorithms.compression.pytorch.pruning import L1FilterPruner, L2FilterPruner, FPGMPruner
from nni.compression.torch import SimulatedAnnealingPruner, ADMMPruner, NetAdaptPruner, AutoCompressPruner from nni.algorithms.compression.pytorch.pruning import SimulatedAnnealingPruner, ADMMPruner, NetAdaptPruner, AutoCompressPruner
from nni.compression.torch import ModelSpeedup from nni.compression.pytorch import ModelSpeedup
from nni.compression.torch.utils.counter import count_flops_params from nni.compression.pytorch.utils.counter import count_flops_params
def get_data(dataset, data_dir, batch_size, test_batch_size): def get_data(dataset, data_dir, batch_size, test_batch_size):
......
...@@ -6,7 +6,7 @@ import torch.nn.functional as F ...@@ -6,7 +6,7 @@ import torch.nn.functional as F
import torch.utils.data import torch.utils.data
import torchvision.datasets as datasets import torchvision.datasets as datasets
import torchvision.transforms as transforms import torchvision.transforms as transforms
from nni.compression.torch import LotteryTicketPruner from nni.algorithms.compression.pytorch.pruning import LotteryTicketPruner
class fc1(nn.Module): class fc1(nn.Module):
......
...@@ -9,8 +9,16 @@ from torchvision import datasets, transforms ...@@ -9,8 +9,16 @@ from torchvision import datasets, transforms
from models.cifar10.vgg import VGG from models.cifar10.vgg import VGG
import nni import nni
from nni.compression.torch import LevelPruner, SlimPruner, FPGMPruner, L1FilterPruner, \ from nni.algorithms.compression.pytorch.pruning import (
L2FilterPruner, AGPPruner, ActivationMeanRankFilterPruner, ActivationAPoZRankFilterPruner LevelPruner,
SlimPruner,
FPGMPruner,
L1FilterPruner,
L2FilterPruner,
AGPPruner,
ActivationMeanRankFilterPruner,
ActivationAPoZRankFilterPruner
)
prune_config = { prune_config = {
'level': { 'level': {
......
...@@ -6,7 +6,7 @@ import torch.nn as nn ...@@ -6,7 +6,7 @@ import torch.nn as nn
import torch.nn.functional as F import torch.nn.functional as F
from torchvision import datasets, transforms from torchvision import datasets, transforms
from models.cifar10.vgg import VGG from models.cifar10.vgg import VGG
from nni.compression.torch import apply_compression_results, ModelSpeedup from nni.compression.pytorch import apply_compression_results, ModelSpeedup
torch.manual_seed(0) torch.manual_seed(0)
use_mask = True use_mask = True
......
...@@ -3,7 +3,7 @@ import torch ...@@ -3,7 +3,7 @@ import torch
import torch.nn as nn import torch.nn as nn
import torch.nn.functional as F import torch.nn.functional as F
from torchvision import datasets, transforms from torchvision import datasets, transforms
from nni.compression.torch import L1FilterPruner from nni.algorithms.compression.pytorch.pruning import L1FilterPruner
from knowledge_distill.knowledge_distill import KnowledgeDistill from knowledge_distill.knowledge_distill import KnowledgeDistill
from models.cifar10.vgg import VGG from models.cifar10.vgg import VGG
......
...@@ -5,7 +5,7 @@ import torch ...@@ -5,7 +5,7 @@ import torch
import torch.nn as nn import torch.nn as nn
import torch.nn.functional as F import torch.nn.functional as F
from torchvision import datasets, transforms from torchvision import datasets, transforms
from nni.compression.torch import SlimPruner from nni.algorithms.compression.pytorch.pruning import SlimPruner
from models.cifar10.vgg import VGG from models.cifar10.vgg import VGG
def updateBN(model): def updateBN(model):
......
...@@ -18,7 +18,7 @@ import torch.optim as optim ...@@ -18,7 +18,7 @@ import torch.optim as optim
from torchvision import datasets, transforms from torchvision import datasets, transforms
from nni.nas.pytorch.mutables import LayerChoice, InputChoice from nni.nas.pytorch.mutables import LayerChoice, InputChoice
from nni.nas.pytorch.classic_nas import get_and_apply_next_architecture from nni.algorithms.nas.pytorch.classic_nas import get_and_apply_next_architecture
logger = logging.getLogger('mnist_AutoML') logger = logging.getLogger('mnist_AutoML')
......
...@@ -17,10 +17,13 @@ tuner: ...@@ -17,10 +17,13 @@ tuner:
#choice: maximize, minimize #choice: maximize, minimize
optimize_mode: maximize optimize_mode: maximize
trial: trial:
command: python3 main_adl.py command: python3 /cifar10/main_adl.py
codeDir: . codeDir: /cifar10
gpuNum: 1 gpuNum: 1
image: {replace_with_the_image_that_has_adaptdl_installed} image: {replace_with_the_image_that_has_adaptdl_installed}
# optional
imagePullSecrets:
- name: {secret}
adaptive: true adaptive: true
checkpoint: checkpoint:
storageClass: dfs storageClass: dfs
......
...@@ -146,7 +146,7 @@ def valid(epoch): ...@@ -146,7 +146,7 @@ def valid(epoch):
writer.add_scalar("Accuracy/Valid", stats["accuracy"], epoch) writer.add_scalar("Accuracy/Valid", stats["accuracy"], epoch)
if adaptdl.env.replica_rank() == 0: if adaptdl.env.replica_rank() == 0:
nni.report_intermediate_result(stats["accuracy"], accum=stats) nni.report_intermediate_result(stats["accuracy"])
print("Valid:", stats) print("Valid:", stats)
return stats["accuracy"] return stats["accuracy"]
......
...@@ -22,5 +22,6 @@ experiment.config.max_trial_number = 5 ...@@ -22,5 +22,6 @@ experiment.config.max_trial_number = 5
experiment.config.search_space = search_space experiment.config.search_space = search_space
experiment.config.trial_command = 'python3 mnist.py' experiment.config.trial_command = 'python3 mnist.py'
experiment.config.trial_code_directory = Path(__file__).parent experiment.config.trial_code_directory = Path(__file__).parent
experiment.config.training_service.use_active_gpu = True
experiment.run(8081, debug=True) experiment.run(8081)
...@@ -3,7 +3,6 @@ ...@@ -3,7 +3,6 @@
from .finegrained_pruning import * from .finegrained_pruning import *
from .structured_pruning import * from .structured_pruning import *
from .apply_compression import apply_compression_results
from .one_shot import * from .one_shot import *
from .agp import * from .agp import *
from .lottery_ticket import LotteryTicketPruner from .lottery_ticket import LotteryTicketPruner
......
...@@ -278,7 +278,8 @@ class StructuredWeightMasker(WeightMasker): ...@@ -278,7 +278,8 @@ class StructuredWeightMasker(WeightMasker):
sparsity, _w, _w_idx) sparsity, _w, _w_idx)
num_total = current_weight.size(0) num_total = current_weight.size(0)
if num_total < 2 or num_prune < 1: if num_total < 2 or num_prune < 1:
return base_mask masks[name] = base_mask
continue
_tmp_mask = self.get_mask( _tmp_mask = self.get_mask(
base_mask, current_weight, num_prune, _w, _w_idx, channel_masks) base_mask, current_weight, num_prune, _w, _w_idx, channel_masks)
......
...@@ -3,3 +3,4 @@ ...@@ -3,3 +3,4 @@
from .speedup import ModelSpeedup from .speedup import ModelSpeedup
from .compressor import Compressor, Pruner, Quantizer from .compressor import Compressor, Pruner, Quantizer
from .pruning import apply_compression_results
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment