Unverified Commit eb65bc32 authored by liuzhe-lz's avatar liuzhe-lz Committed by GitHub
Browse files

Port trial examples' config file to v2 (#3721)


Co-authored-by: default avatarliuzhe <zhe.liu@microsoft.com>
parent c4d449c5
authorName: default
experimentName: example_mnist_pytorch
searchSpaceFile: search_space.json
trialCommand: python3 mnist_tensorboard.py # NOTE: change "python3" to "python" if you are using Windows
trialGpuNumber: 0
trialConcurrency: 1
maxExecDuration: 1h
maxTrialNum: 10
#choice: local, remote, pai
trainingServicePlatform: local
searchSpacePath: search_space.json
#choice: true, false
useAnnotation: false
maxTrialNumber: 10
tuner:
#choice: TPE, Random, Anneal, Evolution, BatchTuner, MetisTuner, GPTuner
#SMAC (SMAC should be installed through nnictl)
builtinTunerName: TPE
name: TPE
classArgs:
#choice: maximize, minimize
optimize_mode: maximize
trial:
command: python3 mnist_tensorboard.py
codeDir: .
gpuNum: 0
trainingService:
platform: local
searchSpace:
momentum:
_type: uniform
_value: [0, 1]
hidden_size:
_type: choice
_value: [128, 256, 512, 1024]
batch_size:
_type: choice
_value: [16, 32, 64, 128]
lr:
_type: choice
_value: [0.0001, 0.001, 0.01, 0.1]
trainingService:
platform: local
trialCodeDirectory: .
trialCommand: python3 mnist.py
trialConcurrency: 1
trialGpuNumber: 0
tuner:
name: TPE
classArgs:
optimize_mode: maximize
authorName: default
experimentName: example_mnist_pytorch
trialConcurrency: 1
maxExecDuration: 1h
maxTrialNum: 10
#choice: local, remote, pai
trainingServicePlatform: local
searchSpacePath: search_space.json
#choice: true, false
useAnnotation: false
tuner:
#choice: TPE, Random, Anneal, Evolution, BatchTuner, MetisTuner, GPTuner
#SMAC (SMAC should be installed through nnictl)
builtinTunerName: TPE
classArgs:
#choice: maximize, minimize
optimize_mode: maximize
trial:
command: python mnist.py
codeDir: .
gpuNum: 0
......@@ -32,4 +32,4 @@ sharedStorage:
# usermount means you have already mount this storage on localMountPoint
# nnimount means nni will try to mount this storage on localMountPoint
# nomount means storage will not mount in local machine, will support partial storages in the future
localMounted: nnimount
\ No newline at end of file
localMounted: nnimount
authorName: NNI Example
experimentName: MNIST TF v2.x
# This is the minimal config file for an NNI experiment.
# Use "nnictl create --config config.yml" to launch this experiment.
# Afterwards, you can check "config_detailed.yml" for more explanation.
searchSpaceFile: search_space.json
trialCommand: python3 mnist.py # NOTE: change "python3" to "python" if you are using Windows
trialGpuNumber: 0
trialConcurrency: 1
maxExecDuration: 1h
maxTrialNum: 10
trainingServicePlatform: local # choices: local, remote, pai
searchSpacePath: search_space.json
useAnnotation: false
tuner:
builtinTunerName: TPE # choices: TPE, Random, Anneal, Evolution, BatchTuner, MetisTuner,
# GPTuner, SMAC (SMAC should be installed through nnictl)
classArgs:
optimize_mode: maximize # choices: maximize, minimize
trial:
command: python3 mnist.py
codeDir: .
gpuNum: 0
name: TPE
classArgs:
optimize_mode: maximize
trainingService:
platform: local
authorName: NNI Example
experimentName: MNIST TF v2.x with assessor
trialConcurrency: 1
maxExecDuration: 1h
maxTrialNum: 50
#choice: local, remote
trainingServicePlatform: local
searchSpacePath: search_space.json
#choice: true, false
useAnnotation: false
searchSpaceFile: search_space.json
trialCommand: python3 mnist.py
trialGpuNumber: 0
trialConcurrency: 2
maxTrialNumber: 50
tuner:
#choice: TPE, Random, Anneal, Evolution, BatchTuner, MetisTuner, GPTuner
#SMAC (SMAC should be installed through nnictl)
builtinTunerName: TPE
name: TPE
classArgs:
#choice: maximize, minimize
optimize_mode: maximize
assessor:
#choice: Medianstop, Curvefitting
builtinAssessorName: Curvefitting
assessor: # Specify early-stop algorithm
name: Curvefitting
classArgs:
epoch_num: 20
threshold: 0.9
trial:
command: python3 mnist.py
codeDir: .
gpuNum: 0
trainingService:
platform: local
# This example shows more configurable fields comparing to the minimal "config.yml"
# You can use "nnictl create --config config_detailed.yml" to launch this experiment.
# If you see an error message saying "port 8080 is used", use "nnictl stop --all" to stop previous experiments.
name: MNIST # An optional name to help you distinguish experiments.
# Hyper-parameter search space can either be configured here or in a seperate file.
# "config.yml" shows how to specify a seperate search space file.
# The common schema of search space is documented here:
# https://nni.readthedocs.io/en/stable/Tutorial/SearchSpaceSpec.html
searchSpace:
dropout_rate:
_type: uniform
_value: [0.5, 0.9]
conv_size:
_type: choice
_value: [2, 3, 5, 7]
hidden_size:
_type: choice
_value: [128, 512, 1024]
batch_size:
_type: choice
_value: [16, 32]
learning_rate:
_type: choice
_value: [0.0001, 0.001, 0.01, 0.1]
trialCommand: python3 mnist.py # The command to launch a trial. NOTE: change "python3" to "python" if you are using Windows.
trialCodeDirectory: . # The path of trial code. By default it's ".", which means the same directory of this config file.
trialGpuNumber: 1 # How many GPUs should each trial use. CUDA is required when it's greater than zero.
trialConcurrency: 4 # Run 4 trials concurrently.
maxTrialNumber: 10 # Generate at most 10 trials.
maxExperimentDuration: 1h # Stop generating trials after 1 hour.
tuner: # Configure the tuning alogrithm.
name: TPE # Supported algorithms: TPE, Random, Anneal, Evolution, GridSearch, GPTuner, PBTTuner, etc.
# Full list: https://nni.readthedocs.io/en/latest/Tuner/BuiltinTuner.html
classArgs: # Algorithm specific arguments. See the tuner's doc for details.
optimize_mode: maximize # "minimize" or "maximize"
# Configure the training platform.
# Supported platforms: local, remote, openpai, aml, kubeflow, kubernetes, adl.
# You can find config template of some platforms in this directory, and others in mnist-pytorch example.
trainingService:
platform: local
useActiveGpu: false # NOTE: Use "true" if you are using an OS with graphical interface (e.g. Windows 10, Ubuntu desktop)
# Reason and details: https://nni.readthedocs.io/en/latest/reference/experiment_config.html#useactivegpu
searchSpaceFile: search_space.json
trialCommand: python3 mnist.py
trialGpuNumber: 0
trialConcurrency: 5
maxTrialNumber: 20
tuner:
name: TPE
classArgs:
optimize_mode: maximize
# For local, remote, openpai, and aml, NNI can use multiple training services at one time
trainingService:
- platform: local
- platform: remote
machineList:
- host: ${your server's IP or domain name}
user: ${your user name}
ssh_key_file: ~/.ssh/id_rsa
- platform: aml
dockerImage: msranni/nni
subscriptionId: ${your subscription ID}
resourceGroup: ${your resource group}
workspaceName: ${your workspace name}
computeTarget: ${your compute target}
authorName: default
experimentName: example_mnist
trialConcurrency: 1
maxExecDuration: 1h
maxTrialNum: 10
#choice: local, remote, pai
trainingServicePlatform: remote
searchSpacePath: search_space.json
#choice: true, false
useAnnotation: false
searchSpaceFile: search_space.json
trialCommand: python3 mnist.py
trialGpuNumber: 0
trialConcurrency: 4
maxTrialNumber: 20
tuner:
#choice: TPE, Random, Anneal, Evolution, BatchTuner, MetisTuner
#SMAC (SMAC should be installed through nnictl)
builtinTunerName: TPE
name: TPE
classArgs:
#choice: maximize, minimize
optimize_mode: maximize
trial:
command: python3 mnist.py
codeDir: .
gpuNum: 0
#machineList can be empty if the platform is local
machineList:
- ip: ${replace_to_your_remote_machine_ip}
username: ${replace_to_your_remote_machine_username}
sshKeyPath: ${replace_to_your_remote_machine_sshKeyPath}
# Below are examples of specifying python environment.
# pythonPath: /opt/python3.7/bin
# pythonPath: C:/Python37
# Below is an example of specifying python environment for windows anaconda user. Multiple paths separated by ';'.
# pythonPath: C:/Users/yourname/.conda/envs/myenv;C:/Users/yourname/.conda/envs/myenv/Scripts;C:/Users/yourname/.conda/envs/myenv/Library/bin
pythonPath: ${replace_to_python_environment_path_in_your_remote_machine}
trainingService:
platform: remote
machineList:
- host: ${your server's IP or domain name}
user: ${your user name}
ssh_key_file: ~/.ssh/id_rsa # We recommend public key over password, it's more secure and convenient.
# You can specify more than one SSH servers:
- host: 123.123.123.123
port: 10022
user: nniuser
password: 12345
pythonPath: /usr/bin # Other examples:
# /opt/python3.9/bin
# C:/Python39
# C:/Users/USERNAME/.conda/envs/ENVNAME;C:/Users/USERNAME/.conda/envs/ENVNAME/Scripts;C:/Users/USERNAME/.conda/envs/ENVNAME/Library/bin
searchSpace:
dropout_rate:
_type: uniform
_value: [0.5, 0.9]
conv_size:
_type: choice
_value: [2, 3, 5, 7]
hidden_size:
_type: choice
_value: [128, 512, 1024]
batch_size:
_type: choice
_value: [16, 32]
learning_rate:
_type: choice
_value: [0.0001, 0.001, 0.01, 0.1]
trainingService:
platform: local
trialCodeDirectory: .
trialCommand: python3 mnist.py
trialConcurrency: 1
trialGpuNumber: 0
tuner:
name: TPE
classArgs:
optimize_mode: maximize
authorName: NNI Example
experimentName: MNIST TF v2.x
trialConcurrency: 1
maxExecDuration: 1h
maxTrialNum: 10
#choice: local, remote, pai
trainingServicePlatform: local
searchSpacePath: search_space.json
#choice: true, false
useAnnotation: false
tuner:
#choice: TPE, Random, Anneal, Evolution, BatchTuner, MetisTuner, GPTuner
#SMAC (SMAC should be installed through nnictl)
builtinTunerName: TPE
classArgs:
#choice: maximize, minimize
optimize_mode: maximize
trial:
command: python mnist.py
codeDir: .
gpuNum: 0
authorName: default
experimentName: example_FashionMNIST-network-morphism
trialCommand: python3 FashionMNIST_keras.py
trialGpuNumber: 1
trialConcurrency: 4
maxExecDuration: 48h
maxTrialNum: 200
#choice: local, remote, pai
trainingServicePlatform: local
#searchSpacePath: search_space.json
#choice: true, false
useAnnotation: false
maxExperimentDuration: 48h
maxTrialNumber: 200
searchSpace: {} # search space of NetworkMorphism is provided via classArgs
tuner:
#choice: TPE, Random, Anneal, Evolution, BatchTuner, NetworkMorphism
#SMAC (SMAC should be installed through nnictl)
builtinTunerName: NetworkMorphism
name: NetworkMorphism
classArgs:
#choice: maximize, minimize
optimize_mode: maximize
#for now, this tuner only supports cv domain
task: cv
#input image width
input_width: 28
#input image channel
input_channel: 1
#number of classes
n_output_node: 10
trial:
command: python3 FashionMNIST_keras.py
codeDir: .
gpuNum: 1
optimize_mode: maximize # maximize or minimize
task: cv # for now, this tuner only supports cv domain
input_width: 28 # input image width
input_channel: 1 # input image channel
n_output_node: 10 # number of classes
trainingService:
platform: local
useActiveGpu: false # NOTE: Use "true" if you are using an OS with graphical interface (e.g. Windows 10, Ubuntu desktop)
# Check the doc for details: https://nni.readthedocs.io/en/latest/reference/experiment_config.html#useactivegpu
authorName: default
experimentName: example_FashionMNIST-network-morphism
trialConcurrency: 1
maxExecDuration: 24h
maxTrialNum: 10
#choice: local, remote, pai
trainingServicePlatform: pai
#choice: true, false
useAnnotation: false
tuner:
#choice: TPE, Random, Anneal, Evolution, BatchTuner, NetworkMorphism
#SMAC (SMAC should be installed through nnictl)
builtinTunerName: NetworkMorphism
classArgs:
#choice: maximize, minimize
optimize_mode: maximize
# for now, this tuner only supports cv domain
task: cv
#input image width
input_width: 28
#input image channel
input_channel: 1
#number of classes
n_output_node: 10
trial:
command: python3 FashionMNIST_keras.py
codeDir: .
gpuNum: 1
cpuNum: 1
memoryMB: 8196
#The docker image to run nni job on pai
image: msranni/nni:latest
nniManagerNFSMountPath: {replace_to_your_nfs_mount_path}
containerNFSMountPath: {replace_to_your_container_mount_path}
paiStorageConfigName: {replace_to_your_storage_config_name}
paiConfig:
#The username to login pai
userName: username
#The token to login pai
token: token
#The host of restful server of pai
host: 10.10.10.10
\ No newline at end of file
authorName: default
experimentName: example_cifar10-network-morphism
trialCommand: python3 cifar10_keras.py
trialGpuNumber: 1
trialConcurrency: 4
maxExecDuration: 48h
maxTrialNum: 200
#choice: local, remote, pai
trainingServicePlatform: local
#searchSpacePath: search_space.json
#choice: true, false
useAnnotation: false
maxExperimentDuration: 48h
maxTrialNumber: 200
searchSpace: {} # search space of NetworkMorphism is provided via classArgs
tuner:
#choice: TPE, Random, Anneal, Evolution, BatchTuner, NetworkMorphism
#SMAC (SMAC should be installed through nnictl)
builtinTunerName: NetworkMorphism
name: NetworkMorphism
classArgs:
#choice: maximize, minimize
optimize_mode: maximize
#for now, this tuner only supports cv domain
task: cv
#input image width
input_width: 32
#input image channel
input_channel: 3
#number of classes
n_output_node: 10
trial:
command: python3 cifar10_keras.py
codeDir: .
gpuNum: 1
optimize_mode: maximize # maximize or minimize
task: cv # for now, this tuner only supports cv domain
input_width: 32 # input image width
input_channel: 3 # input image channel
n_output_node: 10 # number of classes
trainingService:
platform: local
useActiveGpu: false # NOTE: Use "true" if you are using an OS with graphical interface (e.g. Windows 10, Ubuntu desktop)
# Check the doc for details: https://nni.readthedocs.io/en/latest/reference/experiment_config.html#useactivegpu
authorName: default
experimentName: example_cifar10-network-morphism
trialConcurrency: 1
maxExecDuration: 24h
maxTrialNum: 10
#choice: local, remote, pai
trainingServicePlatform: pai
#choice: true, false
useAnnotation: false
tuner:
#choice: TPE, Random, Anneal, Evolution, BatchTuner, NetworkMorphism
#SMAC (SMAC should be installed through nnictl)
builtinTunerName: NetworkMorphism
classArgs:
#choice: maximize, minimize
optimize_mode: maximize
# for now, this tuner only supports cv domain
task: cv
#input image width
input_width: 32
#input image channel
input_channel: 3
#number of classes
n_output_node: 10
trial:
command: python3 cifar10_keras.py
codeDir: .
gpuNum: 1
cpuNum: 1
memoryMB: 8196
#The docker image to run nni job on pai
image: msranni/nni:latest
nniManagerNFSMountPath: {replace_to_your_nfs_mount_path}
containerNFSMountPath: {replace_to_your_container_mount_path}
paiStorageConfigName: {replace_to_your_storage_config_name}
paiConfig:
#The username to login pai
userName: username
#The token to login pai
token: token
#The host of restful server of pai
host: 10.10.10.10
\ No newline at end of file
authorName: default
experimentName: example_sklearn-classification
searchSpaceFile: search_space.json
trialCommand: python3 main.py
trialConcurrency: 1
maxExecDuration: 1h
maxTrialNum: 100
#choice: local, remote
trainingServicePlatform: local
searchSpacePath: search_space.json
#choice: true, false
useAnnotation: false
maxTrialNumber: 100
maxExperimentDuration: 1h
tuner:
#choice: TPE, Random, Anneal, Evolution, BatchTuner, MetisTuner
builtinTunerName: TPE
name: TPE
classArgs:
#choice: maximize, minimize
optimize_mode: maximize
trial:
command: python3 main.py
codeDir: .
gpuNum: 0
\ No newline at end of file
trainingService: # For other platforms, check mnist-pytorch example
platform: local
authorName: default
experimentName: example_sklearn
trialConcurrency: 1
maxExecDuration: 1h
maxTrialNum: 100
#choice: local, remote, pai
trainingServicePlatform: pai
searchSpacePath: search_space.json
#choice: true, false
useAnnotation: false
tuner:
#choice: TPE, Random, Anneal, Evolution, BatchTuner,MetisTuner
#SMAC (SMAC should be installed through nnictl)
builtinTunerName: TPE
classArgs:
#choice: maximize, minimize
optimize_mode: maximize
trial:
command: python3 main.py
codeDir: .
gpuNum: 0
cpuNum: 1
memoryMB: 8196
#The docker image to run nni job on pai
image: msranni/nni:latest
nniManagerNFSMountPath: {replace_to_your_nfs_mount_path}
containerNFSMountPath: {replace_to_your_container_mount_path}
paiStorageConfigName: {replace_to_your_storage_config_name}
paiConfig:
#The username to login pai
userName: username
#The token to login pai
token: token
#The host of restful server of pai
host: 10.10.10.10
\ No newline at end of file
authorName: default
experimentName: example_sklearn-regression
searchSpaceFile: search_space.json
trialCommand: python3 main.py
trialConcurrency: 1
maxExecDuration: 1h
maxTrialNum: 30
#choice: local, remote
trainingServicePlatform: local
searchSpacePath: search_space.json
#choice: true, false
useAnnotation: false
maxTrialNumber: 30
maxExperimentDuration: 1h
tuner:
#choice: TPE, Random, Anneal, Evolution, BatchTuner, MetisTuner
builtinTunerName: TPE
name: TPE
classArgs:
#choice: maximize, minimize
optimize_mode: maximize
trial:
command: python3 main.py
codeDir: .
gpuNum: 0
\ No newline at end of file
trainingService: # For other platforms, check mnist-pytorch example
platform: local
authorName: default
experimentName: example_sklearn
trialConcurrency: 1
maxExecDuration: 1h
maxTrialNum: 100
#choice: local, remote, pai
trainingServicePlatform: pai
searchSpacePath: search_space.json
#choice: true, false
useAnnotation: false
tuner:
#choice: TPE, Random, Anneal, Evolution, BatchTuner, MetisTuner
#SMAC (SMAC should be installed through nnictl)
builtinTunerName: TPE
classArgs:
#choice: maximize, minimize
optimize_mode: maximize
trial:
command: python3 main.py
codeDir: .
gpuNum: 0
cpuNum: 1
memoryMB: 8196
#The docker image to run nni job on pai
image: msranni/nni:latest
nniManagerNFSMountPath: {replace_to_your_nfs_mount_path}
containerNFSMountPath: {replace_to_your_container_mount_path}
paiStorageConfigName: {replace_to_your_storage_config_name}
paiConfig:
#The username to login pai
userName: username
#The token to login pai
token: token
#The host of restful server of pai
host: 10.10.10.10
\ No newline at end of file
......@@ -124,7 +124,7 @@ class ConfigBase:
type_name = str(field.type).replace('typing.', '')
optional = any([
type_name.startswith('Optional['),
type_name.startswith('Union[') and 'NoneType' in type_name,
type_name.startswith('Union[') and 'None' in type_name,
type_name == 'Any'
])
if value is None:
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment