Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
nni
Commits
e5d61809
Unverified
Commit
e5d61809
authored
Jun 25, 2021
by
Yuge Zhang
Committed by
GitHub
Jun 25, 2021
Browse files
Integrate coverage report into CI (#3854)
parent
51c6afde
Changes
40
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
41 additions
and
26 deletions
+41
-26
.gitignore
.gitignore
+6
-0
dependencies/develop.txt
dependencies/develop.txt
+2
-0
dependencies/recommended.txt
dependencies/recommended.txt
+1
-0
nni/algorithms/__init__.py
nni/algorithms/__init__.py
+0
-0
nni/algorithms/compression/__init__.py
nni/algorithms/compression/__init__.py
+0
-0
nni/algorithms/compression/pytorch/__init__.py
nni/algorithms/compression/pytorch/__init__.py
+0
-0
nni/algorithms/compression/pytorch/auto_compress/experiment.py
...lgorithms/compression/pytorch/auto_compress/experiment.py
+2
-3
nni/algorithms/compression/pytorch/auto_compress/utils.py
nni/algorithms/compression/pytorch/auto_compress/utils.py
+3
-2
nni/algorithms/compression/pytorch/pruning/iterative_pruner.py
...lgorithms/compression/pytorch/pruning/iterative_pruner.py
+3
-2
nni/algorithms/compression/pytorch/pruning/sensitivity_pruner.py
...orithms/compression/pytorch/pruning/sensitivity_pruner.py
+2
-1
nni/algorithms/compression/pytorch/quantization/quantizers.py
...algorithms/compression/pytorch/quantization/quantizers.py
+3
-2
nni/algorithms/feature_engineering/__init__.py
nni/algorithms/feature_engineering/__init__.py
+0
-0
nni/algorithms/hpo/__init__.py
nni/algorithms/hpo/__init__.py
+0
-0
nni/algorithms/hpo/dngo_tuner.py
nni/algorithms/hpo/dngo_tuner.py
+3
-2
nni/algorithms/hpo/hyperband_advisor.py
nni/algorithms/hpo/hyperband_advisor.py
+6
-3
nni/algorithms/hpo/metis_tuner/metis_tuner.py
nni/algorithms/hpo/metis_tuner/metis_tuner.py
+2
-2
nni/algorithms/hpo/networkmorphism_tuner/networkmorphism_tuner.py
...rithms/hpo/networkmorphism_tuner/networkmorphism_tuner.py
+1
-1
nni/algorithms/nas/__init__.py
nni/algorithms/nas/__init__.py
+0
-0
nni/algorithms/nas/pytorch/__init__.py
nni/algorithms/nas/pytorch/__init__.py
+0
-0
nni/algorithms/nas/pytorch/cream/trainer.py
nni/algorithms/nas/pytorch/cream/trainer.py
+7
-8
No files found.
.gitignore
View file @
e5d61809
...
@@ -32,6 +32,12 @@ lib-cov
...
@@ -32,6 +32,12 @@ lib-cov
# Coverage directory used by tools like istanbul
# Coverage directory used by tools like istanbul
coverage
coverage
junit/
coverage.xml
test-*.xml
.coverage.*
htmlcov/
.coverage
# nyc test coverage
# nyc test coverage
.nyc_output
.nyc_output
...
...
dependencies/develop.txt
View file @
e5d61809
...
@@ -6,5 +6,7 @@ sphinx-rtd-theme
...
@@ -6,5 +6,7 @@ sphinx-rtd-theme
sphinxcontrib-websupport
sphinxcontrib-websupport
nbsphinx
nbsphinx
pytest
pytest
pytest-cov
pytest-azurepipelines
coverage
coverage
ipython
ipython
dependencies/recommended.txt
View file @
e5d61809
...
@@ -2,6 +2,7 @@
...
@@ -2,6 +2,7 @@
-f https://download.pytorch.org/whl/torch_stable.html
-f https://download.pytorch.org/whl/torch_stable.html
tensorflow
tensorflow
keras
torch == 1.6.0+cpu ; sys_platform != "darwin"
torch == 1.6.0+cpu ; sys_platform != "darwin"
torch == 1.6.0 ; sys_platform == "darwin"
torch == 1.6.0 ; sys_platform == "darwin"
torchvision == 0.7.0+cpu ; sys_platform != "darwin"
torchvision == 0.7.0+cpu ; sys_platform != "darwin"
...
...
nni/algorithms/__init__.py
0 → 100644
View file @
e5d61809
nni/algorithms/compression/__init__.py
0 → 100644
View file @
e5d61809
nni/algorithms/compression/pytorch/__init__.py
0 → 100644
View file @
e5d61809
nni/algorithms/compression/pytorch/auto_compress/experiment.py
View file @
e5d61809
...
@@ -5,8 +5,6 @@ import inspect
...
@@ -5,8 +5,6 @@ import inspect
from
pathlib
import
Path
,
PurePath
from
pathlib
import
Path
,
PurePath
from
typing
import
overload
,
Union
,
List
from
typing
import
overload
,
Union
,
List
from
numpy
import
tri
from
nni.experiment
import
Experiment
,
ExperimentConfig
from
nni.experiment
import
Experiment
,
ExperimentConfig
from
nni.algorithms.compression.pytorch.auto_compress.interface
import
AbstractAutoCompressionModule
from
nni.algorithms.compression.pytorch.auto_compress.interface
import
AbstractAutoCompressionModule
...
@@ -62,7 +60,8 @@ class AutoCompressionExperiment(Experiment):
...
@@ -62,7 +60,8 @@ class AutoCompressionExperiment(Experiment):
def
start
(
self
,
port
:
int
,
debug
:
bool
)
->
None
:
def
start
(
self
,
port
:
int
,
debug
:
bool
)
->
None
:
trial_code_directory
=
str
(
PurePath
(
Path
(
self
.
config
.
trial_code_directory
).
absolute
()))
+
'/'
trial_code_directory
=
str
(
PurePath
(
Path
(
self
.
config
.
trial_code_directory
).
absolute
()))
+
'/'
assert
self
.
module_file_path
.
startswith
(
trial_code_directory
),
'The file path of the user-provided module should under trial_code_directory.'
assert
self
.
module_file_path
.
startswith
(
trial_code_directory
),
\
'The file path of the user-provided module should under trial_code_directory.'
relative_module_path
=
self
.
module_file_path
.
split
(
trial_code_directory
)[
1
]
relative_module_path
=
self
.
module_file_path
.
split
(
trial_code_directory
)[
1
]
# only support linux, need refactor?
# only support linux, need refactor?
command
=
'python3 -m nni.algorithms.compression.pytorch.auto_compress.trial_entry --module_file_name {} --module_class_name {}'
command
=
'python3 -m nni.algorithms.compression.pytorch.auto_compress.trial_entry --module_file_name {} --module_class_name {}'
...
...
nni/algorithms/compression/pytorch/auto_compress/utils.py
View file @
e5d61809
...
@@ -31,10 +31,11 @@ class AutoCompressionSearchSpaceGenerator:
...
@@ -31,10 +31,11 @@ class AutoCompressionSearchSpaceGenerator:
pruner_name
pruner_name
Supported pruner name: 'level', 'slim', 'l1', 'l2', 'fpgm', 'taylorfo', 'apoz', 'mean_activation'.
Supported pruner name: 'level', 'slim', 'l1', 'l2', 'fpgm', 'taylorfo', 'apoz', 'mean_activation'.
config_list
config_list
Except 'op_types' and 'op_names', other config value can be written as `{'_type': ..., '_value': ...}`.
Except 'op_types' and 'op_names', other config value can be written as
`
`{'_type': ..., '_value': ...}`
`
.
**algo_kwargs
**algo_kwargs
The additional pruner parameters except 'model', 'config_list', 'optimizer', 'trainer', 'criterion'.
The additional pruner parameters except 'model', 'config_list', 'optimizer', 'trainer', 'criterion'.
i.e., you can set `statistics_batch_num={'_type': 'choice', '_value': [1, 2, 3]}` in TaylorFOWeightFilterPruner or just `statistics_batch_num=1`.
i.e., you can set ``statistics_batch_num={'_type': 'choice', '_value': [1, 2, 3]}``
in TaylorFOWeightFilterPruner or just ``statistics_batch_num=1``.
"""
"""
sub_search_space
=
{
'_name'
:
pruner_name
}
sub_search_space
=
{
'_name'
:
pruner_name
}
for
config
in
config_list
:
for
config
in
config_list
:
...
...
nni/algorithms/compression/pytorch/pruning/iterative_pruner.py
View file @
e5d61809
...
@@ -84,7 +84,7 @@ class IterativePruner(DependencyAwarePruner):
...
@@ -84,7 +84,7 @@ class IterativePruner(DependencyAwarePruner):
self
.
_trainer
(
self
.
bound_model
,
optimizer
=
self
.
optimizer
,
criterion
=
self
.
_criterion
,
epoch
=
epoch
)
self
.
_trainer
(
self
.
bound_model
,
optimizer
=
self
.
optimizer
,
criterion
=
self
.
_criterion
,
epoch
=
epoch
)
# NOTE: workaround for statistics_batch_num bigger than max batch number in one epoch, need refactor
# NOTE: workaround for statistics_batch_num bigger than max batch number in one epoch, need refactor
if
hasattr
(
self
.
masker
,
'statistics_batch_num'
)
and
hasattr
(
self
,
'iterations'
):
if
hasattr
(
self
.
masker
,
'statistics_batch_num'
)
and
hasattr
(
self
,
'iterations'
):
if
self
.
iterations
<
self
.
masker
.
statistics_batch_num
:
if
self
.
iterations
<
self
.
masker
.
statistics_batch_num
:
# pylint: disable=access-member-before-definition
self
.
iterations
=
self
.
masker
.
statistics_batch_num
self
.
iterations
=
self
.
masker
.
statistics_batch_num
self
.
update_mask
()
self
.
update_mask
()
self
.
bound_model
.
train
(
training
)
self
.
bound_model
.
train
(
training
)
...
@@ -118,7 +118,8 @@ class AGPPruner(IterativePruner):
...
@@ -118,7 +118,8 @@ class AGPPruner(IterativePruner):
choose from `['level', 'slim', 'l1', 'l2', 'fpgm', 'taylorfo', 'apoz', 'mean_activation']`, by default `level`
choose from `['level', 'slim', 'l1', 'l2', 'fpgm', 'taylorfo', 'apoz', 'mean_activation']`, by default `level`
"""
"""
def
__init__
(
self
,
model
,
config_list
,
optimizer
,
trainer
,
criterion
,
num_iterations
=
10
,
epochs_per_iteration
=
1
,
pruning_algorithm
=
'level'
):
def
__init__
(
self
,
model
,
config_list
,
optimizer
,
trainer
,
criterion
,
num_iterations
=
10
,
epochs_per_iteration
=
1
,
pruning_algorithm
=
'level'
):
super
().
__init__
(
model
,
config_list
,
optimizer
=
optimizer
,
trainer
=
trainer
,
criterion
=
criterion
,
super
().
__init__
(
model
,
config_list
,
optimizer
=
optimizer
,
trainer
=
trainer
,
criterion
=
criterion
,
num_iterations
=
num_iterations
,
epochs_per_iteration
=
epochs_per_iteration
)
num_iterations
=
num_iterations
,
epochs_per_iteration
=
epochs_per_iteration
)
assert
isinstance
(
optimizer
,
torch
.
optim
.
Optimizer
),
"AGP pruner is an iterative pruner, please pass optimizer of the model to it"
assert
isinstance
(
optimizer
,
torch
.
optim
.
Optimizer
),
"AGP pruner is an iterative pruner, please pass optimizer of the model to it"
...
...
nni/algorithms/compression/pytorch/pruning/sensitivity_pruner.py
View file @
e5d61809
...
@@ -10,9 +10,10 @@ import torch
...
@@ -10,9 +10,10 @@ import torch
from
schema
import
And
,
Optional
from
schema
import
And
,
Optional
from
nni.compression.pytorch.compressor
import
Pruner
from
nni.compression.pytorch.compressor
import
Pruner
from
nni.compression.pytorch.utils.config_validation
import
CompressorSchema
from
nni.compression.pytorch.utils.config_validation
import
CompressorSchema
from
.constants_pruner
import
PRUNER_DICT
from
nni.compression.pytorch.utils.sensitivity_analysis
import
SensitivityAnalysis
from
nni.compression.pytorch.utils.sensitivity_analysis
import
SensitivityAnalysis
from
.constants_pruner
import
PRUNER_DICT
MAX_PRUNE_RATIO_PER_ITER
=
0.95
MAX_PRUNE_RATIO_PER_ITER
=
0.95
...
...
nni/algorithms/compression/pytorch/quantization/quantizers.py
View file @
e5d61809
...
@@ -245,7 +245,7 @@ class QAT_Quantizer(Quantizer):
...
@@ -245,7 +245,7 @@ class QAT_Quantizer(Quantizer):
def
quantize_weight
(
self
,
wrapper
,
**
kwargs
):
def
quantize_weight
(
self
,
wrapper
,
**
kwargs
):
config
=
wrapper
.
config
config
=
wrapper
.
config
module
=
wrapper
.
module
module
=
wrapper
.
module
input
=
kwargs
[
'input_tensor'
]
input
=
kwargs
[
'input_tensor'
]
# pylint: disable=redefined-builtin
weight
=
copy
.
deepcopy
(
wrapper
.
module
.
old_weight
.
data
)
weight
=
copy
.
deepcopy
(
wrapper
.
module
.
old_weight
.
data
)
weight_bits
=
get_bits_length
(
config
,
'weight'
)
weight_bits
=
get_bits_length
(
config
,
'weight'
)
quant_start_step
=
config
.
get
(
'quant_start_step'
,
0
)
quant_start_step
=
config
.
get
(
'quant_start_step'
,
0
)
...
@@ -304,7 +304,8 @@ class QAT_Quantizer(Quantizer):
...
@@ -304,7 +304,8 @@ class QAT_Quantizer(Quantizer):
module
.
ema_decay
)
module
.
ema_decay
)
module
.
tracked_max_activation
=
update_ema
(
module
.
tracked_max_activation
,
current_max
,
module
.
tracked_max_activation
=
update_ema
(
module
.
tracked_max_activation
,
current_max
,
module
.
ema_decay
)
module
.
ema_decay
)
module
.
scale
,
module
.
zero_point
=
update_quantization_param
(
output_bits
,
module
.
tracked_min_activation
,
module
.
tracked_max_activation
)
module
.
scale
,
module
.
zero_point
=
update_quantization_param
(
output_bits
,
module
.
tracked_min_activation
,
module
.
tracked_max_activation
)
out
=
self
.
_quantize
(
output_bits
,
module
,
output
)
out
=
self
.
_quantize
(
output_bits
,
module
,
output
)
out
=
self
.
_dequantize
(
module
,
out
)
out
=
self
.
_dequantize
(
module
,
out
)
return
out
return
out
...
...
nni/algorithms/feature_engineering/__init__.py
0 → 100644
View file @
e5d61809
nni/algorithms/hpo/__init__.py
0 → 100644
View file @
e5d61809
nni/algorithms/hpo/dngo_tuner.py
View file @
e5d61809
...
@@ -2,11 +2,12 @@ import logging
...
@@ -2,11 +2,12 @@ import logging
import
numpy
as
np
import
numpy
as
np
import
torch
import
torch
from
pybnn
import
DNGO
from
torch.distributions
import
Normal
import
nni.parameter_expressions
as
parameter_expressions
import
nni.parameter_expressions
as
parameter_expressions
from
nni
import
ClassArgsValidator
from
nni
import
ClassArgsValidator
from
nni.tuner
import
Tuner
from
nni.tuner
import
Tuner
from
pybnn
import
DNGO
from
torch.distributions
import
Normal
_logger
=
logging
.
getLogger
(
__name__
)
_logger
=
logging
.
getLogger
(
__name__
)
...
...
nni/algorithms/hpo/hyperband_advisor.py
View file @
e5d61809
...
@@ -265,8 +265,10 @@ class HyperbandClassArgsValidator(ClassArgsValidator):
...
@@ -265,8 +265,10 @@ class HyperbandClassArgsValidator(ClassArgsValidator):
}).
validate
(
kwargs
)
}).
validate
(
kwargs
)
class
Hyperband
(
MsgDispatcherBase
):
class
Hyperband
(
MsgDispatcherBase
):
"""Hyperband inherit from MsgDispatcherBase rather than Tuner, because it integrates both tuner's functions and assessor's functions.
"""
This is an implementation that could fully leverage available resources or follow the algorithm process, i.e., high parallelism or serial.
Hyperband inherit from MsgDispatcherBase rather than Tuner, because it integrates both tuner's functions and assessor's functions.
This is an implementation that could fully leverage available resources or follow the algorithm process,
i.e., high parallelism or serial.
A single execution of Hyperband takes a finite budget of (s_max + 1)B.
A single execution of Hyperband takes a finite budget of (s_max + 1)B.
Parameters
Parameters
...
@@ -346,7 +348,8 @@ class Hyperband(MsgDispatcherBase):
...
@@ -346,7 +348,8 @@ class Hyperband(MsgDispatcherBase):
self
.
curr_hb
+=
1
self
.
curr_hb
+=
1
_logger
.
debug
(
'create a new bracket, self.curr_hb=%d, self.curr_s=%d'
,
self
.
curr_hb
,
self
.
curr_s
)
_logger
.
debug
(
'create a new bracket, self.curr_hb=%d, self.curr_s=%d'
,
self
.
curr_hb
,
self
.
curr_s
)
self
.
curr_bracket_id
=
'{}-{}'
.
format
(
self
.
curr_hb
,
self
.
curr_s
)
self
.
curr_bracket_id
=
'{}-{}'
.
format
(
self
.
curr_hb
,
self
.
curr_s
)
self
.
brackets
[
self
.
curr_bracket_id
]
=
Bracket
(
self
.
curr_bracket_id
,
self
.
curr_s
,
self
.
s_max
,
self
.
eta
,
self
.
R
,
self
.
optimize_mode
)
self
.
brackets
[
self
.
curr_bracket_id
]
=
Bracket
(
self
.
curr_bracket_id
,
self
.
curr_s
,
self
.
s_max
,
self
.
eta
,
self
.
R
,
self
.
optimize_mode
)
next_n
,
next_r
=
self
.
brackets
[
self
.
curr_bracket_id
].
get_n_r
()
next_n
,
next_r
=
self
.
brackets
[
self
.
curr_bracket_id
].
get_n_r
()
_logger
.
debug
(
'new bracket, next_n=%d, next_r=%d'
,
next_n
,
next_r
)
_logger
.
debug
(
'new bracket, next_n=%d, next_r=%d'
,
next_n
,
next_r
)
assert
self
.
searchspace_json
is
not
None
and
self
.
random_state
is
not
None
assert
self
.
searchspace_json
is
not
None
and
self
.
random_state
is
not
None
...
...
nni/algorithms/hpo/metis_tuner/metis_tuner.py
View file @
e5d61809
...
@@ -15,6 +15,8 @@ import numpy as np
...
@@ -15,6 +15,8 @@ import numpy as np
from
schema
import
Schema
,
Optional
from
schema
import
Schema
,
Optional
from
nni
import
ClassArgsValidator
from
nni
import
ClassArgsValidator
from
nni.tuner
import
Tuner
from
nni.utils
import
OptimizeMode
,
extract_scalar_reward
from
.
import
lib_constraint_summation
from
.
import
lib_constraint_summation
from
.
import
lib_data
from
.
import
lib_data
from
.Regression_GMM
import
CreateModel
as
gmm_create_model
from
.Regression_GMM
import
CreateModel
as
gmm_create_model
...
@@ -23,8 +25,6 @@ from .Regression_GP import CreateModel as gp_create_model
...
@@ -23,8 +25,6 @@ from .Regression_GP import CreateModel as gp_create_model
from
.Regression_GP
import
OutlierDetection
as
gp_outlier_detection
from
.Regression_GP
import
OutlierDetection
as
gp_outlier_detection
from
.Regression_GP
import
Prediction
as
gp_prediction
from
.Regression_GP
import
Prediction
as
gp_prediction
from
.Regression_GP
import
Selection
as
gp_selection
from
.Regression_GP
import
Selection
as
gp_selection
from
nni.tuner
import
Tuner
from
nni.utils
import
OptimizeMode
,
extract_scalar_reward
logger
=
logging
.
getLogger
(
"Metis_Tuner_AutoML"
)
logger
=
logging
.
getLogger
(
"Metis_Tuner_AutoML"
)
...
...
nni/algorithms/hpo/networkmorphism_tuner/networkmorphism_tuner.py
View file @
e5d61809
...
@@ -8,13 +8,13 @@ networkmorphsim_tuner.py
...
@@ -8,13 +8,13 @@ networkmorphsim_tuner.py
import
logging
import
logging
import
os
import
os
from
schema
import
Optional
,
Schema
from
schema
import
Optional
,
Schema
from
nni
import
ClassArgsValidator
from
nni.tuner
import
Tuner
from
nni.tuner
import
Tuner
from
nni.utils
import
OptimizeMode
,
extract_scalar_reward
from
nni.utils
import
OptimizeMode
,
extract_scalar_reward
from
.bayesian
import
BayesianOptimizer
from
.bayesian
import
BayesianOptimizer
from
.nn
import
CnnGenerator
,
MlpGenerator
from
.nn
import
CnnGenerator
,
MlpGenerator
from
.utils
import
Constant
from
.utils
import
Constant
from
.graph
import
graph_to_json
,
json_to_graph
from
.graph
import
graph_to_json
,
json_to_graph
from
nni
import
ClassArgsValidator
logger
=
logging
.
getLogger
(
"NetworkMorphism_AutoML"
)
logger
=
logging
.
getLogger
(
"NetworkMorphism_AutoML"
)
...
...
nni/algorithms/nas/__init__.py
0 → 100644
View file @
e5d61809
nni/algorithms/nas/pytorch/__init__.py
0 → 100644
View file @
e5d61809
nni/algorithms/nas/pytorch/cream/trainer.py
View file @
e5d61809
# Copyright (c) Microsoft Corporation.
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
# Licensed under the MIT license.
import
os
import
torch
import
logging
import
logging
from
copy
import
deepcopy
from
copy
import
deepcopy
import
torch
from
nni.nas.pytorch.trainer
import
Trainer
from
nni.nas.pytorch.trainer
import
Trainer
from
nni.nas.pytorch.utils
import
AverageMeterGroup
from
nni.nas.pytorch.utils
import
AverageMeterGroup
...
@@ -209,8 +208,8 @@ class CreamSupernetTrainer(Trainer):
...
@@ -209,8 +208,8 @@ class CreamSupernetTrainer(Trainer):
return
g
*
optimizer
.
param_groups
[
-
1
][
'lr'
]
+
w
return
g
*
optimizer
.
param_groups
[
-
1
][
'lr'
]
+
w
# split training images into several slices
# split training images into several slices
def
_get_minibatch_input
(
self
,
input
):
def
_get_minibatch_input
(
self
,
input
):
# pylint: disable=redefined-builtin
slice
=
self
.
slices
slice
=
self
.
slices
# pylint: disable=redefined-builtin
x
=
deepcopy
(
input
[:
slice
].
clone
().
detach
())
x
=
deepcopy
(
input
[:
slice
].
clone
().
detach
())
return
x
return
x
...
@@ -259,8 +258,8 @@ class CreamSupernetTrainer(Trainer):
...
@@ -259,8 +258,8 @@ class CreamSupernetTrainer(Trainer):
return
torch
.
mean
(
torch
.
sum
(
-
soft_target
*
logsoftmax
(
pred
),
1
))
return
torch
.
mean
(
torch
.
sum
(
-
soft_target
*
logsoftmax
(
pred
),
1
))
# forward validation data
# forward validation data
def
_forward_validation
(
self
,
input
,
target
):
def
_forward_validation
(
self
,
input
,
target
):
# pylint: disable=redefined-builtin
slice
=
self
.
slices
slice
=
self
.
slices
# pylint: disable=redefined-builtin
x
=
input
[
slice
:
slice
*
2
].
clone
()
x
=
input
[
slice
:
slice
*
2
].
clone
()
self
.
_replace_mutator_cand
(
self
.
current_student_arch
)
self
.
_replace_mutator_cand
(
self
.
current_student_arch
)
...
@@ -281,7 +280,7 @@ class CreamSupernetTrainer(Trainer):
...
@@ -281,7 +280,7 @@ class CreamSupernetTrainer(Trainer):
self
.
mutator
.
_cache
=
cand
self
.
mutator
.
_cache
=
cand
# update meta matching networks
# update meta matching networks
def
_run_update
(
self
,
input
,
target
,
batch_idx
):
def
_run_update
(
self
,
input
,
target
,
batch_idx
):
# pylint: disable=redefined-builtin
if
self
.
_isUpdateMeta
(
batch_idx
):
if
self
.
_isUpdateMeta
(
batch_idx
):
x
=
self
.
_get_minibatch_input
(
input
)
x
=
self
.
_get_minibatch_input
(
input
)
...
...
Prev
1
2
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment