Unverified Commit 580ce0a3 authored by liuzhe-lz's avatar liuzhe-lz Committed by GitHub
Browse files

Integration test (#3088)

parent e12f6113
......@@ -6,7 +6,7 @@ import torch.nn as nn
import torch.nn.functional as F
from torchvision import datasets, transforms
from models.cifar10.vgg import VGG
from nni.compression.torch import apply_compression_results, ModelSpeedup
from nni.compression.pytorch import apply_compression_results, ModelSpeedup
torch.manual_seed(0)
use_mask = True
......
......@@ -3,7 +3,7 @@ import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import datasets, transforms
from nni.compression.torch import L1FilterPruner
from nni.algorithms.compression.pytorch.pruning import L1FilterPruner
from knowledge_distill.knowledge_distill import KnowledgeDistill
from models.cifar10.vgg import VGG
......
......@@ -5,7 +5,7 @@ import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import datasets, transforms
from nni.compression.torch import SlimPruner
from nni.algorithms.compression.pytorch.pruning import SlimPruner
from models.cifar10.vgg import VGG
def updateBN(model):
......
......@@ -18,7 +18,7 @@ import torch.optim as optim
from torchvision import datasets, transforms
from nni.nas.pytorch.mutables import LayerChoice, InputChoice
from nni.nas.pytorch.classic_nas import get_and_apply_next_architecture
from nni.algorithms.nas.pytorch.classic_nas import get_and_apply_next_architecture
logger = logging.getLogger('mnist_AutoML')
......
......@@ -3,7 +3,6 @@
from .finegrained_pruning import *
from .structured_pruning import *
from .apply_compression import apply_compression_results
from .one_shot import *
from .agp import *
from .lottery_ticket import LotteryTicketPruner
......
......@@ -3,3 +3,4 @@
from .speedup import ModelSpeedup
from .compressor import Compressor, Pruner, Quantizer
from .pruning import apply_compression_results
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from .apply_compression import apply_compression_results
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import logging
import torch
logger = logging.getLogger('torch apply compression')
def apply_compression_results(model, masks_file, map_location=None):
"""
Apply the masks from ```masks_file``` to the model
Note: this API is for inference, because it simply multiplies weights with
corresponding masks when this API is called.
Parameters
----------
model : torch.nn.Module
The model to be compressed
masks_file : str
The path of the mask file
map_location : str
the device on which masks are placed, same to map_location in ```torch.load```
"""
masks = torch.load(masks_file, map_location)
for name, module in model.named_modules():
if name in masks:
module.weight.data = module.weight.data.mul_(masks[name]['weight'])
if hasattr(module, 'bias') and module.bias is not None and 'bias' in masks[name]:
module.bias.data = module.bias.data.mul_(masks[name]['bias'])
\ No newline at end of file
trigger: none
pr: none
schedules:
- cron: 0 16 * * *
branches:
include: [ master ]
jobs:
- job: linux
pool: NNI CI GPU3
timeoutInMinutes: 120
steps:
- script: |
echo "##vso[task.setvariable variable=PATH]${PATH}:${HOME}/.local/bin"
echo "##vso[task.setvariable variable=NNI_RELEASE]999.$(date -u +%Y%m%d%H%M%S)"
python3 -m pip install -U --upgrade pip setuptools
python3 -m pip install -U pytest
displayName: Prepare
- script: |
set -e
python3 setup.py build_ts
python3 setup.py bdist_wheel -p manylinux1_x86_64
python3 -m pip install dist/nni-${NNI_RELEASE}-py3-none-manylinux1_x86_64.whl
displayName: Install NNI
- script: |
set -e
python3 -m pip install -U scikit-learn==0.23.2
python3 -m pip install -U torchvision==0.4.2
python3 -m pip install -U torch==1.3.1
python3 -m pip install -U keras==2.1.6
python3 -m pip install -U tensorflow==2.3.1 tensorflow-estimator==2.3.0
python3 -m pip install -U thop
sudo apt-get install swig -y
nnictl package install --name=SMAC
nnictl package install --name=BOHB
nnictl package install --name=PPOTuner
displayName: Install extra dependencies
- script: |
set -e
cd examples/tuners/customized_tuner
python3 setup.py develop --user
nnictl package install .
displayName: Install customized tuner
- script: |
set -e
(cd test && python3 -m pytest ut)
export PATH=$PATH:$PWD/toolchain/yarn/bin
export CI=true
(cd ts/nni_manager && yarn test)
(cd ts/nasui && yarn test)
displayName: Unit test
continueOnError: true
- script: |
cd test
python3 nni_test/nnitest/run_tests.py --config config/integration_tests.yml --ts local
displayName: Integration test
continueOnError: true
- script: |
cd test
source scripts/nas.sh
displayName: NAS test
continueOnError: true
- script: |
cd test
source scripts/model_compression.sh
displayName: Model compression test
trigger: none
pr: none
schedules:
- cron: 0 16 * * *
branches:
include: [ master ]
jobs:
- job: local_windows
pool: NNI CI WINDOWS2
timeoutInMinutes: 120
steps:
- script: |
python -m pip install -U --upgrade pip setuptools
python -m pip install -U pytest
displayName: Install Python tools
- script: |
python -m pip uninstall nni --yes
set NNI_RELEASE=999.0
python setup.py build_ts
python setup.py bdist_wheel -p win_amd64
python -m pip install dist/nni-999.0-py3-none-win_amd64.whl
displayName: Install NNI
- script: |
python -m pip install -U scikit-learn==0.23.2
python -m pip install -U keras==2.1.6
python -m pip install -U torchvision===0.4.1 torch===1.3.1 -f https://download.pytorch.org/whl/torch_stable.html
python -m pip install -U tensorflow==2.3.1 tensorflow-estimator==2.3.0
nnictl package install --name=PPOTuner
displayName: Install extra dependencies
- script: |
cd examples/tuners/customized_tuner
python setup.py develop --user
nnictl package install .
displayName: Install example customized tuner
- script: |
cd test
python -m pytest ut
echo "TODO: TypeScript UT"
displayName: Unit test
continueOnError: true
- script: |
cd test
python nni_test/nnitest/run_tests.py --config config/integration_tests.yml --ts local
displayName: Integration test
trigger: none
pr: none
schedules:
- cron: 0 16 * * *
branches:
include: [ master ]
# variables set on VSO: (mostly for security concern)
# pai_user
# pai_token
# manager_ip
# docker_hub_password
jobs:
- job: pai
pool: NNI CI PAI CLI
timeoutInMinutes: 120
steps:
- script: |
export NNI_RELEASE=999.$(date -u +%Y%m%d%H%M%S)
echo "##vso[task.setvariable variable=PATH]${PATH}:${HOME}/.local/bin"
echo "##vso[task.setvariable variable=NNI_RELEASE]${NNI_RELEASE}"
echo "Working directory: ${PWD}"
echo "NNI version: ${NNI_RELEASE}"
echo "Build docker image: $(build_docker_image)"
python3 -m pip install -U --upgrade pip setuptools
displayName: Prepare
- script: |
set -e
python3 setup.py build_ts
python3 setup.py bdist_wheel -p manylinux1_x86_64
python3 -m pip install -U dist/nni-${NNI_RELEASE}-py3-none-manylinux1_x86_64.whl
displayName: Build and install NNI
- script: |
set -e
sudo apt-get install swig -y
nnictl package install --name=SMAC
nnictl package install --name=BOHB
displayName: Install extra tuners
- script: |
set -e
cd examples/tuners/customized_tuner
python3 setup.py develop --user
nnictl package install .
displayName: Install customized tuner
- script: |
set -e
docker login -u nnidev -p $(docker_hub_password)
echo '## Build docker image ##'
docker build --build-arg NNI_RELEASE=${NNI_RELEASE} -t nnidev/nni-it-pai:latest .
echo '## Upload docker image ##'
docker push nnidev/nni-it-pai:latest
condition: eq(variables['build_docker_image'], 'true')
displayName: Build and upload docker image
- script: |
set -e
cd test
python3 nni_test/nnitest/generate_ts_config.py \
--ts pai \
--pai_reuse false \
--pai_host https://ne.openpai.org \
--pai_user $(pai_user) \
--nni_docker_image nnidev/nni-it-pai:latest \
--pai_storage_config_name confignfs-data \
--pai_token $(pai_token) \
--nni_manager_nfs_mount_path /home/quzha/mnt-pai-ne/shinyang3 \
--container_nfs_mount_path /mnt/confignfs-data/shinyang3 \
--nni_manager_ip $(manager_ip) \
--vc nni
python3 nni_test/nnitest/run_tests.py --config config/integration_tests.yml --ts pai
displayName: Integration test
- script: |
set -e
cd test
python3 nni_test/nnitest/generate_ts_config.py \
--ts pai \
--pai_reuse true \
--pai_host https://ne.openpai.org \
--pai_user $(pai_user) \
--nni_docker_image nnidev/nni-it-pai:latest \
--pai_storage_config_name confignfs-data \
--pai_token $(pai_token) \
--nni_manager_nfs_mount_path /home/quzha/mnt-pai-ne/shinyang3 \
--container_nfs_mount_path /mnt/confignfs-data/shinyang3 \
--nni_manager_ip $(manager_ip) \
--vc nni
python3 nni_test/nnitest/run_tests.py --config config/integration_tests.yml --ts pai
displayName: Integration test (reuse mode)
trigger: none
pr: none
schedules:
- cron: 0 16 * * *
branches:
include: [ master ]
variables:
worker: remote_nni-ci-gpu-03
# variables set on VSO: (for security concern)
# manager_ip
# worker_ip
# password_in_docker
jobs:
- job: remote_linux2linux
pool: NNI CI REMOTE CLI
timeoutInMinutes: 120
steps:
- script: |
export NNI_RELEASE=999.$(date -u +%Y%m%d%H%M%S)
echo "##vso[task.setvariable variable=PATH]${PATH}:${HOME}/.local/bin"
echo "##vso[task.setvariable variable=NNI_RELEASE]${NNI_RELEASE}"
echo "Working directory: ${PWD}"
echo "NNI version: ${NNI_RELEASE}"
python3 -m pip install -U --upgrade pip setuptools
displayName: Prepare
- script: |
set -e
python3 setup.py build_ts
python3 setup.py bdist_wheel -p manylinux1_x86_64
python3 -m pip install dist/nni-${NNI_RELEASE}-py3-none-manylinux1_x86_64.whl
displayName: Install NNI
- script: |
set -e
sudo apt-get install swig -y
nnictl package install --name=SMAC
nnictl package install --name=BOHB
displayName: Install extra tuners
- script: |
set -e
cd examples/tuners/customized_tuner
python3 setup.py develop --user
nnictl package install .
displayName: Install customized tuner
- task: CopyFilesOverSSH@0
inputs:
sshEndpoint: $(worker)
sourceFolder: dist
targetFolder: /tmp/nnitest/$(Build.BuildId)/dist
overwrite: true
displayName: Copy wheel to remote machine
timeoutInMinutes: 10
- task: CopyFilesOverSSH@0
inputs:
sshEndpoint: $(worker)
sourceFolder: test
targetFolder: /tmp/nnitest/$(Build.BuildId)/test
overwrite: true
displayName: Copy test scripts to remote machine
timeoutInMinutes: 10
- task: SSH@0
inputs:
sshEndpoint: $(worker)
runOptions: commands
commands: |
python3 /tmp/nnitest/$(Build.BuildId)/test/nni_test/nnitest/remote_docker.py --mode start --name $(Build.BuildId) --image nni/nni
echo "##vso[task.setvariable variable=docker_port]$(cat /tmp/nnitest/$(Build.BuildId)/port)"
displayName: Start docker
- script: |
cd test
python3 nni_test/nnitest/generate_ts_config.py \
--ts remote \
--remote_reuse false \
--remote_user nni \
--remote_host $(worker_ip) \
--remote_port $(docker_port) \
--remote_pwd $(password_in_docker) \
--nni_manager_ip $(manager_ip)
python3 nni_test/nnitest/run_tests.py --config config/integration_tests.yml --ts remote
displayName: Integration test
- script: |
cd test
python3 nni_test/nnitest/generate_ts_config.py \
--ts remote \
--remote_reuse true \
--remote_user nni \
--remote_host $(worker_ip) \
--remote_port $(docker_port) \
--remote_pwd $(password_in_docker) \
--nni_manager_ip $(manager_ip)
python3 nni_test/nnitest/run_tests.py --config config/integration_tests.yml --ts remote
displayName: Integration test (reuse mode)
- task: SSH@0
inputs:
sshEndpoint: $(worker)
runOptions: commands
commands: python3 /tmp/nnitest/$(Build.BuildId)/test/nni_test/nnitest/remote_docker.py --mode stop --name $(Build.BuildId)
displayName: Stop docker
......@@ -106,7 +106,7 @@ def _setup():
packages = _find_python_packages(),
package_data = {
'nni': ['**/requirements.txt'],
'nni': _find_requirements_txt(), # must do this manually due to setuptools issue #1806
'nni_node': _find_node_files() # note: this does not work before building
},
......@@ -136,15 +136,22 @@ def _find_python_packages():
packages.append(dirpath.replace('/', '.'))
return sorted(packages) + ['nni_node']
def _find_requirements_txt():
requirement_files = []
for dirpath, dirnames, filenames in os.walk('nni'):
if 'requirements.txt' in filenames:
requirement_files.append(os.path.join(dirpath[len('nni/'):], 'requirements.txt'))
return requirement_files
def _find_node_files():
if not os.path.exists('nni_node'):
if release and 'built_ts' not in sys.argv:
sys.exit('ERROR: To build a release version, run "python setup.py built_ts" first')
if release and 'build_ts' not in sys.argv:
sys.exit('ERROR: To build a release version, run "python setup.py build_ts" first')
return []
files = []
for dirpath, dirnames, filenames in os.walk('nni_node'):
for filename in filenames:
files.append((dirpath + '/' + filename)[len('nni_node/'):])
files.append(os.path.join(dirpath[len('nni_node/'):], filename))
if '__init__.py' in files:
files.remove('__init__.py')
return sorted(files)
......@@ -169,9 +176,10 @@ class BuildTs(Command):
class Build(build):
def run(self):
assert release, 'Please set environment variable "NNI_RELEASE=<release_version>"'
assert os.path.isfile('nni_node/main.js'), 'Please run "build_ts" before "build"'
assert not os.path.islink('nni_node/main.js'), 'This is a development build'
if not release:
sys.exit('Please set environment variable "NNI_RELEASE=<release_version>"')
if os.path.islink('nni_node/main.js'):
sys.exit('A development build already exists. Please uninstall NNI and run "python3 setup.py clean --all".')
super().run()
class Develop(develop):
......@@ -228,4 +236,5 @@ _temp_files = [
]
_setup()
if __name__ == '__main__':
_setup()
authorName: nni
experimentName: default_test
maxExecDuration: 5m
maxTrialNum: 4
trialConcurrency: 2
searchSpacePath: ./mnist_search_space.json
tuner:
builtinTunerName: Random
assessor:
builtinAssessorName: Medianstop
classArgs:
optimize_mode: maximize
trial:
codeDir: ../../../examples/trials/mnist-tfv2
command: python3 mnist.py
useAnnotation: false
multiPhase: false
multiThread: false
trainingServicePlatform: local
......@@ -37,17 +37,12 @@ testCases:
- name: sklearn-regression
configFile: test/config/examples/sklearn-regression.yml
- name: mnist-tfv1
configFile: test/config/examples/mnist-tfv1.yml
- name: mnist-tensorflow
configFile: test/config/examples/mnist-tfv2.yml
config:
maxTrialNum: 1
trialConcurrency: 1
- name: mnist-keras
configFile: test/config/examples/mnist-keras.yml
config:
maxTrialNum: 2
trialConcurrency: 1
trainingService: local remote # FIXME: timeout on pai, looks like tensorflow failed to link CUDA
- name: mnist-pytorch-local
configFile: test/config/examples/mnist-pytorch.yml
......@@ -61,11 +56,12 @@ testCases:
launchCommand: nnictl create --config $configFile --debug
trainingService: remote pai kubeflow frameworkcontroller dlts
- name: mnist-annotation
configFile: test/config/examples/mnist-annotation.yml
config:
maxTrialNum: 1
trialConcurrency: 1
# TODO: move this and following commented test cases to pytorch or tf2
#- name: mnist-annotation
# configFile: test/config/examples/mnist-annotation.yml
# config:
# maxTrialNum: 1
# trialConcurrency: 1
- name: cifar10-pytorch
configFile: test/config/examples/cifar10-pytorch.yml
......@@ -79,8 +75,8 @@ testCases:
command: python3 main.py --epochs 1 --batches 1
gpuNum: 0
- name: nested-ss
configFile: test/config/examples/mnist-nested-search-space.yml
#- name: nested-ss
# configFile: test/config/examples/mnist-nested-search-space.yml
- name: classic-nas-gen-ss
configFile: test/config/examples/classic-nas-pytorch.yml
......@@ -215,40 +211,40 @@ testCases:
#########################################################################
# nni tuners test
#########################################################################
- name: tuner-annel
configFile: test/config/tuners/anneal.yml
#- name: tuner-annel
# configFile: test/config/tuners/anneal.yml
- name: tuner-evolution
configFile: test/config/tuners/evolution.yml
#- name: tuner-evolution
# configFile: test/config/tuners/evolution.yml
- name: tuner-random
configFile: test/config/tuners/random.yml
#- name: tuner-random
# configFile: test/config/tuners/random.yml
- name: tuner-smac
configFile: test/config/tuners/smac.yml
platform: linux darwin
#- name: tuner-smac
# configFile: test/config/tuners/smac.yml
# platform: linux darwin
- name: tuner-tpe
configFile: test/config/tuners/tpe.yml
#- name: tuner-tpe
# configFile: test/config/tuners/tpe.yml
- name: tuner-batch
configFile: test/config/tuners/batch.yml
#- name: tuner-batch
# configFile: test/config/tuners/batch.yml
- name: tuner-bohb
configFile: test/config/tuners/bohb.yml
platform: linux darwin
#- name: tuner-bohb
# configFile: test/config/tuners/bohb.yml
# platform: linux darwin
- name: tuner-gp
configFile: test/config/tuners/gp.yml
#- name: tuner-gp
# configFile: test/config/tuners/gp.yml
- name: tuner-grid
configFile: test/config/tuners/gridsearch.yml
#- name: tuner-grid
# configFile: test/config/tuners/gridsearch.yml
- name: tuner-hyperband
configFile: test/config/tuners/hyperband.yml
#- name: tuner-hyperband
# configFile: test/config/tuners/hyperband.yml
- name: tuner-metis
configFile: test/config/tuners/metis.yml
#- name: tuner-metis
# configFile: test/config/tuners/metis.yml
- name: tuner-regularized_evolution
configFile: test/config/tuners/regularized_evolution_tuner.yml
......
......@@ -13,8 +13,8 @@ from torchvision.models.resnet import resnet18
import unittest
from unittest import TestCase, main
from nni.compression.pytorch import ModelSpeedup
from nni.algorithms.compression.pytorch.pruning import L1FilterPruner, apply_compression_results
from nni.compression.pytorch import ModelSpeedup, apply_compression_results
from nni.algorithms.compression.pytorch.pruning import L1FilterPruner
from nni.algorithms.compression.pytorch.pruning.weight_masker import WeightMasker
from nni.algorithms.compression.pytorch.pruning.one_shot import _StructuredFilterPruner
......
......@@ -6,6 +6,7 @@ from subprocess import Popen, PIPE, STDOUT
import sys
from unittest import TestCase, main, skipIf
sys.path.append(str(Path(__file__).parent))
from mock.restful_server import init_response
from nni.tools.nnictl.command_utils import kill_command
......
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
import sys
sys.path.append(os.path.dirname(__file__))
from mock.restful_server import init_response
from mock.experiment import create_mock_experiment, stop_mock_experiment, generate_args_parser, \
generate_args
from mock.experiment import create_mock_experiment, stop_mock_experiment, generate_args_parser, generate_args
from nni.tools.nnictl.nnictl_utils import get_experiment_time, get_experiment_status, \
check_experiment_id, parse_ids, get_config_filename, get_experiment_port, check_rest, \
trial_ls, list_experiment
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment