Unverified Commit 7c4b8c0d authored by liuzhe-lz's avatar liuzhe-lz Committed by GitHub
Browse files

Make pylint happy (#1649)

Update python sdk and nni_annotation to pass pylint rules
parent 22316800
...@@ -21,7 +21,6 @@ ...@@ -21,7 +21,6 @@
import logging import logging
import threading import threading
from enum import Enum from enum import Enum
from .common import multi_thread_enabled
class CommandType(Enum): class CommandType(Enum):
...@@ -49,7 +48,6 @@ try: ...@@ -49,7 +48,6 @@ try:
_out_file = open(4, 'wb') _out_file = open(4, 'wb')
except OSError: except OSError:
_msg = 'IPC pipeline not exists, maybe you are importing tuner/assessor from trial code?' _msg = 'IPC pipeline not exists, maybe you are importing tuner/assessor from trial code?'
import logging
logging.getLogger(__name__).warning(_msg) logging.getLogger(__name__).warning(_msg)
...@@ -64,7 +62,7 @@ def send(command, data): ...@@ -64,7 +62,7 @@ def send(command, data):
data = data.encode('utf8') data = data.encode('utf8')
assert len(data) < 1000000, 'Command too long' assert len(data) < 1000000, 'Command too long'
msg = b'%b%06d%b' % (command.value, len(data), data) msg = b'%b%06d%b' % (command.value, len(data), data)
logging.getLogger(__name__).debug('Sending command, data: [%s]' % msg) logging.getLogger(__name__).debug('Sending command, data: [%s]', msg)
_out_file.write(msg) _out_file.write(msg)
_out_file.flush() _out_file.flush()
finally: finally:
...@@ -76,7 +74,7 @@ def receive(): ...@@ -76,7 +74,7 @@ def receive():
Returns a tuple of command (CommandType) and payload (str) Returns a tuple of command (CommandType) and payload (str)
""" """
header = _in_file.read(8) header = _in_file.read(8)
logging.getLogger(__name__).debug('Received command, header: [%s]' % header) logging.getLogger(__name__).debug('Received command, header: [%s]', header)
if header is None or len(header) < 8: if header is None or len(header) < 8:
# Pipe EOF encountered # Pipe EOF encountered
logging.getLogger(__name__).debug('Pipe EOF encountered') logging.getLogger(__name__).debug('Pipe EOF encountered')
...@@ -85,5 +83,5 @@ def receive(): ...@@ -85,5 +83,5 @@ def receive():
data = _in_file.read(length) data = _in_file.read(length)
command = CommandType(header[:2]) command = CommandType(header[:2])
data = data.decode('utf8') data = data.decode('utf8')
logging.getLogger(__name__).debug('Received command, data: [%s]' % data) logging.getLogger(__name__).debug('Received command, data: [%s]', data)
return command, data return command, data
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
import os import os
class Recoverable: class Recoverable:
def load_checkpoint(self): def load_checkpoint(self):
pass pass
...@@ -31,4 +32,4 @@ class Recoverable: ...@@ -31,4 +32,4 @@ class Recoverable:
ckp_path = os.getenv('NNI_CHECKPOINT_DIRECTORY') ckp_path = os.getenv('NNI_CHECKPOINT_DIRECTORY')
if ckp_path is not None and os.path.isdir(ckp_path): if ckp_path is not None and os.path.isdir(ckp_path):
return ckp_path return ckp_path
return None return None
\ No newline at end of file
...@@ -25,15 +25,18 @@ import logging ...@@ -25,15 +25,18 @@ import logging
import sys import sys
import numpy as np import numpy as np
from ConfigSpaceNNI import Configuration
from nni.tuner import Tuner
from nni.utils import OptimizeMode, extract_scalar_reward
from smac.facade.epils_facade import EPILS from smac.facade.epils_facade import EPILS
from smac.facade.roar_facade import ROAR from smac.facade.roar_facade import ROAR
from smac.facade.smac_facade import SMAC from smac.facade.smac_facade import SMAC
from smac.scenario.scenario import Scenario from smac.scenario.scenario import Scenario
from smac.utils.io.cmd_reader import CMDReader from smac.utils.io.cmd_reader import CMDReader
from ConfigSpaceNNI import Configuration
from nni.tuner import Tuner
from nni.utils import OptimizeMode, extract_scalar_reward
from .convert_ss_to_scenario import generate_scenario from .convert_ss_to_scenario import generate_scenario
...@@ -72,11 +75,9 @@ class SMACTuner(Tuner): ...@@ -72,11 +75,9 @@ class SMACTuner(Tuner):
root_logger = logging.getLogger() root_logger = logging.getLogger()
root_logger.setLevel(args.verbose_level) root_logger.setLevel(args.verbose_level)
logger_handler = logging.StreamHandler( logger_handler = logging.StreamHandler(stream=sys.stdout)
stream=sys.stdout)
if root_logger.level >= logging.INFO: if root_logger.level >= logging.INFO:
formatter = logging.Formatter( formatter = logging.Formatter("%(levelname)s:\t%(message)s")
"%(levelname)s:\t%(message)s")
else: else:
formatter = logging.Formatter( formatter = logging.Formatter(
"%(asctime)s:%(levelname)s:%(name)s:%(message)s", "%(asctime)s:%(levelname)s:%(name)s:%(message)s",
......
...@@ -43,8 +43,6 @@ __all__ = [ ...@@ -43,8 +43,6 @@ __all__ = [
] ]
# pylint: disable=unused-argument
if trial_env_vars.NNI_PLATFORM is None: if trial_env_vars.NNI_PLATFORM is None:
def choice(*options, name=None): def choice(*options, name=None):
return param_exp.choice(options, np.random.RandomState()) return param_exp.choice(options, np.random.RandomState())
...@@ -150,42 +148,16 @@ else: ...@@ -150,42 +148,16 @@ else:
optional_input_size: number of candidate inputs to be chosen optional_input_size: number of candidate inputs to be chosen
tf: tensorflow module tf: tensorflow module
''' '''
args = (mutable_id, mutable_layer_id, funcs, funcs_args, fixed_inputs, optional_inputs, optional_input_size)
if mode == 'classic_mode': if mode == 'classic_mode':
return classic_mode(mutable_id, return classic_mode(*args)
mutable_layer_id,
funcs,
funcs_args,
fixed_inputs,
optional_inputs,
optional_input_size)
assert tf is not None, 'Internal Error: Tensorflow should not be None in modes other than classic_mode' assert tf is not None, 'Internal Error: Tensorflow should not be None in modes other than classic_mode'
if mode == 'enas_mode': if mode == 'enas_mode':
return enas_mode(mutable_id, return enas_mode(*args, tf)
mutable_layer_id,
funcs,
funcs_args,
fixed_inputs,
optional_inputs,
optional_input_size,
tf)
if mode == 'oneshot_mode': if mode == 'oneshot_mode':
return oneshot_mode(mutable_id, return oneshot_mode(*args, tf)
mutable_layer_id,
funcs,
funcs_args,
fixed_inputs,
optional_inputs,
optional_input_size,
tf)
if mode == 'darts_mode': if mode == 'darts_mode':
return darts_mode(mutable_id, return darts_mode(*args, tf)
mutable_layer_id,
funcs,
funcs_args,
fixed_inputs,
optional_inputs,
optional_input_size,
tf)
raise RuntimeError('Unrecognized mode: %s' % mode) raise RuntimeError('Unrecognized mode: %s' % mode)
def _get_param(key): def _get_param(key):
......
...@@ -96,7 +96,7 @@ def report_final_result(metric): ...@@ -96,7 +96,7 @@ def report_final_result(metric):
'parameter_id': _params['parameter_id'], 'parameter_id': _params['parameter_id'],
'trial_job_id': trial_env_vars.NNI_TRIAL_JOB_ID, 'trial_job_id': trial_env_vars.NNI_TRIAL_JOB_ID,
'type': 'FINAL', 'type': 'FINAL',
'sequence': 0, # TODO: may be unnecessary 'sequence': 0,
'value': metric 'value': metric
}) })
platform.send_metric(metric) platform.send_metric(metric)
...@@ -27,7 +27,6 @@ _logger = logging.getLogger(__name__) ...@@ -27,7 +27,6 @@ _logger = logging.getLogger(__name__)
class Tuner(Recoverable): class Tuner(Recoverable):
# pylint: disable=no-self-use,unused-argument
def generate_parameters(self, parameter_id, **kwargs): def generate_parameters(self, parameter_id, **kwargs):
"""Returns a set of trial (hyper-)parameters, as a serializable object. """Returns a set of trial (hyper-)parameters, as a serializable object.
...@@ -47,7 +46,7 @@ class Tuner(Recoverable): ...@@ -47,7 +46,7 @@ class Tuner(Recoverable):
result = [] result = []
for parameter_id in parameter_id_list: for parameter_id in parameter_id_list:
try: try:
_logger.debug("generating param for {}".format(parameter_id)) _logger.debug("generating param for %s", parameter_id)
res = self.generate_parameters(parameter_id, **kwargs) res = self.generate_parameters(parameter_id, **kwargs)
except nni.NoMoreTrialError: except nni.NoMoreTrialError:
return result return result
...@@ -67,10 +66,12 @@ class Tuner(Recoverable): ...@@ -67,10 +66,12 @@ class Tuner(Recoverable):
raise NotImplementedError('Tuner: receive_trial_result not implemented') raise NotImplementedError('Tuner: receive_trial_result not implemented')
def accept_customized_trials(self, accept=True): def accept_customized_trials(self, accept=True):
"""Enable or disable receiving results of user-added hyper-parameters. """Enable or disable receiving results of user-added hyper-parameters.
By default `receive_trial_result()` will only receive results of algorithm-generated hyper-parameters. By default `receive_trial_result()` will only receive results of algorithm-generated hyper-parameters.
If tuners want to receive those of customized parameters as well, they can call this function in `__init__()`. If tuners want to receive those of customized parameters as well, they can call this function in `__init__()`.
""" """
# pylint: disable=attribute-defined-outside-init
# FIXME: because tuner is designed as interface, this API should not be here
self._accept_customized = accept self._accept_customized = accept
def trial_end(self, parameter_id, success, **kwargs): def trial_end(self, parameter_id, success, **kwargs):
...@@ -78,7 +79,6 @@ class Tuner(Recoverable): ...@@ -78,7 +79,6 @@ class Tuner(Recoverable):
parameter_id: int parameter_id: int
success: True if the trial successfully completed; False if failed or terminated success: True if the trial successfully completed; False if failed or terminated
""" """
pass
def update_search_space(self, search_space): def update_search_space(self, search_space):
"""Update the search space of tuner. Must override. """Update the search space of tuner. Must override.
...@@ -91,20 +91,19 @@ class Tuner(Recoverable): ...@@ -91,20 +91,19 @@ class Tuner(Recoverable):
path: checkpoint directory for tuner path: checkpoint directory for tuner
""" """
checkpoin_path = self.get_checkpoint_path() checkpoin_path = self.get_checkpoint_path()
_logger.info('Load checkpoint ignored by tuner, checkpoint path: %s' % checkpoin_path) _logger.info('Load checkpoint ignored by tuner, checkpoint path: %s', checkpoin_path)
def save_checkpoint(self): def save_checkpoint(self):
"""Save the checkpoint of tuner. """Save the checkpoint of tuner.
path: checkpoint directory for tuner path: checkpoint directory for tuner
""" """
checkpoin_path = self.get_checkpoint_path() checkpoin_path = self.get_checkpoint_path()
_logger.info('Save checkpoint ignored by tuner, checkpoint path: %s' % checkpoin_path) _logger.info('Save checkpoint ignored by tuner, checkpoint path: %s', checkpoin_path)
def import_data(self, data): def import_data(self, data):
"""Import additional data for tuning """Import additional data for tuning
data: a list of dictionarys, each of which has at least two keys, 'parameter' and 'value' data: a list of dictionarys, each of which has at least two keys, 'parameter' and 'value'
""" """
pass
def _on_exit(self): def _on_exit(self):
pass pass
......
...@@ -84,12 +84,13 @@ def extract_scalar_reward(value, scalar_key='default'): ...@@ -84,12 +84,13 @@ def extract_scalar_reward(value, scalar_key='default'):
Incorrect final result: the final result should be float/int, Incorrect final result: the final result should be float/int,
or a dict which has a key named "default" whose value is float/int. or a dict which has a key named "default" whose value is float/int.
""" """
if isinstance(value, float) or isinstance(value, int): if isinstance(value, (float, int)):
reward = value reward = value
elif isinstance(value, dict) and scalar_key in value and isinstance(value[scalar_key], (float, int)): elif isinstance(value, dict) and scalar_key in value and isinstance(value[scalar_key], (float, int)):
reward = value[scalar_key] reward = value[scalar_key]
else: else:
raise RuntimeError('Incorrect final result: the final result should be float/int, or a dict which has a key named "default" whose value is float/int.') raise RuntimeError('Incorrect final result: the final result should be float/int, ' \
'or a dict which has a key named "default" whose value is float/int.')
return reward return reward
...@@ -101,8 +102,7 @@ def convert_dict2tuple(value): ...@@ -101,8 +102,7 @@ def convert_dict2tuple(value):
for _keys in value: for _keys in value:
value[_keys] = convert_dict2tuple(value[_keys]) value[_keys] = convert_dict2tuple(value[_keys])
return tuple(sorted(value.items())) return tuple(sorted(value.items()))
else: return value
return value
def init_dispatcher_logger(): def init_dispatcher_logger():
......
# This pylintrc file is a little more strick than the one in root of code directory
# SDK source MUST pass lint rules in top level directory, and SHOULD pass rules here
[SETTINGS]
max-line-length=140
disable =
missing-docstring,
invalid-name, # C0103
no-member, # E1101: sometimes pylint cannot detect members correctly due to a bug
c-extension-no-member, # I1101
no-self-use, # R0201: many functions in this SDK are designed for override
duplicate-code, # R0801
too-many-instance-attributes, # R0902
too-few-public-methods, # R0903
too-many-public-methods, # R0904
too-many-return-statements, # R0911
too-many-branches, # R0912
too-many-arguments, # R0913
too-many-locals, # R0914
too-many-statements, # R0915
too-many-nested-blocks, # R1702
no-else-return, # R1705
chained-comparison, # R1716
no-else-raise, # R1720
protected-access, # W0212: underscore variables may be protected by whole SDK instead of single module
arguments-differ, # W0221: pylint cannot handle *args and **kwargs
super-init-not-called, # W0231: some interface classes do not expect users to call init
useless-super-delegation, # W0235: derived init may have different docstring
global-statement, # W0603: globals are useful to hide SDK internal states from user
unused-argument, # W0613: many functions in this SDK are designed for override
broad-except, # W0703: the SDK commonly catch exceptions to report error
fixme, # W0511
ignore-patterns=test.*.py
# List of members which are set dynamically and missed by pylint inference
generated-members=numpy.*,torch.*
...@@ -33,7 +33,7 @@ class App extends React.Component<{}, AppState> { ...@@ -33,7 +33,7 @@ class App extends React.Component<{}, AppState> {
} }
changeInterval = (interval: number) => { changeInterval = (interval: number) => {
this.setState({ interval: interval }); this.setState({ interval });
if (this.timerId === null && interval !== 0) { if (this.timerId === null && interval !== 0) {
window.setTimeout(this.refresh); window.setTimeout(this.refresh);
} else if (this.timerId !== null && interval === 0) { } else if (this.timerId !== null && interval === 0) {
......
...@@ -154,4 +154,3 @@ def _generate_specific_file(src_path, dst_path, exp_id, trial_id, module): ...@@ -154,4 +154,3 @@ def _generate_specific_file(src_path, dst_path, exp_id, trial_id, module):
raise RuntimeError(src_path + ' ' + '\n'.join(str(arg) for arg in exc.args)) raise RuntimeError(src_path + ' ' + '\n'.join(str(arg) for arg in exc.args))
else: else:
raise RuntimeError('Failed to expand annotations for %s: %r' % (src_path, exc)) raise RuntimeError('Failed to expand annotations for %s: %r' % (src_path, exc))
...@@ -20,9 +20,10 @@ ...@@ -20,9 +20,10 @@
import ast import ast
import astor
import numbers import numbers
import astor
# pylint: disable=unidiomatic-typecheck # pylint: disable=unidiomatic-typecheck
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment