Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
nni
Commits
ae7a72bc
Commit
ae7a72bc
authored
Jun 19, 2019
by
Hongarc
Committed by
Chi Song
Jun 19, 2019
Browse files
Remove all whitespace at end of line (#1162)
parent
14c1b31c
Changes
176
Hide whitespace changes
Inline
Side-by-side
Showing
16 changed files
with
66 additions
and
66 deletions
+66
-66
tools/nni_annotation/code_generator.py
tools/nni_annotation/code_generator.py
+1
-1
tools/nni_annotation/testcase/annotated/mnist.py
tools/nni_annotation/testcase/annotated/mnist.py
+1
-1
tools/nni_annotation/testcase/usercode/mnist.py
tools/nni_annotation/testcase/usercode/mnist.py
+14
-14
tools/nni_cmd/config_schema.py
tools/nni_cmd/config_schema.py
+2
-2
tools/nni_cmd/config_utils.py
tools/nni_cmd/config_utils.py
+4
-4
tools/nni_cmd/launcher.py
tools/nni_cmd/launcher.py
+10
-10
tools/nni_cmd/launcher_utils.py
tools/nni_cmd/launcher_utils.py
+6
-6
tools/nni_cmd/nnictl.py
tools/nni_cmd/nnictl.py
+1
-1
tools/nni_cmd/nnictl_utils.py
tools/nni_cmd/nnictl_utils.py
+2
-2
tools/nni_cmd/package_management.py
tools/nni_cmd/package_management.py
+2
-2
tools/nni_cmd/updater.py
tools/nni_cmd/updater.py
+1
-1
tools/nni_trial_tool/hdfsClientUtility.py
tools/nni_trial_tool/hdfsClientUtility.py
+3
-3
tools/nni_trial_tool/log_utils.py
tools/nni_trial_tool/log_utils.py
+4
-4
tools/nni_trial_tool/test/test_hdfsClientUtility.py
tools/nni_trial_tool/test/test_hdfsClientUtility.py
+12
-12
tools/nni_trial_tool/trial_keeper.py
tools/nni_trial_tool/trial_keeper.py
+2
-2
uninstall.ps1
uninstall.ps1
+1
-1
No files found.
tools/nni_annotation/code_generator.py
View file @
ae7a72bc
...
...
@@ -230,7 +230,7 @@ def test_variable_equal(node1, node2):
if
len
(
node1
)
!=
len
(
node2
):
return
False
return
all
(
test_variable_equal
(
n1
,
n2
)
for
n1
,
n2
in
zip
(
node1
,
node2
))
return
node1
==
node2
...
...
tools/nni_annotation/testcase/annotated/mnist.py
View file @
ae7a72bc
...
...
@@ -161,7 +161,7 @@ def main():
def
generate_default_params
():
params
=
{
'data_dir'
:
'/tmp/tensorflow/mnist/input_data'
,
'dropout_rate'
:
0.5
,
'channel_1_num'
:
32
,
'channel_2_num'
:
64
,
'conv_size'
:
5
,
'pool_size'
:
2
,
'hidden_size'
:
1024
,
'batch_size'
:
'conv_size'
:
5
,
'pool_size'
:
2
,
'hidden_size'
:
1024
,
'batch_size'
:
50
,
'batch_num'
:
200
,
'learning_rate'
:
0.0001
}
return
params
...
...
tools/nni_annotation/testcase/usercode/mnist.py
View file @
ae7a72bc
...
...
@@ -44,7 +44,7 @@ class MnistNetwork(object):
self
.
x
=
tf
.
placeholder
(
tf
.
float32
,
[
None
,
self
.
x_dim
],
name
=
'input_x'
)
self
.
y
=
tf
.
placeholder
(
tf
.
float32
,
[
None
,
self
.
y_dim
],
name
=
'input_y'
)
self
.
keep_prob
=
tf
.
placeholder
(
tf
.
float32
,
name
=
'keep_prob'
)
# Reshape to use within a convolutional neural net.
# Last dimension is for "features" - there is only one here, since images are
# grayscale -- it would be 3 for an RGB image, 4 for RGBA, etc.
...
...
@@ -55,8 +55,8 @@ class MnistNetwork(object):
#print('input dim cannot be sqrt and reshape. input dim: ' + str(self.x_dim))
logger
.
debug
(
'input dim cannot be sqrt and reshape. input dim: '
+
str
(
self
.
x_dim
))
raise
x_image
=
tf
.
reshape
(
self
.
x
,
[
-
1
,
input_dim
,
input_dim
,
1
])
x_image
=
tf
.
reshape
(
self
.
x
,
[
-
1
,
input_dim
,
input_dim
,
1
])
# First convolutional layer - maps one grayscale image to 32 feature maps.
with
tf
.
name_scope
(
'conv1'
):
W_conv1
=
weight_variable
([
self
.
conv_size
,
self
.
conv_size
,
1
,
self
.
channel_1_num
])
...
...
@@ -68,38 +68,38 @@ class MnistNetwork(object):
with
tf
.
name_scope
(
'pool1'
):
"""@nni.function_choice(max_pool(h_conv1, self.pool_size),avg_pool(h_conv1, self.pool_size),name=max_pool)"""
h_pool1
=
max_pool
(
h_conv1
,
self
.
pool_size
)
# Second convolutional layer -- maps 32 feature maps to 64.
with
tf
.
name_scope
(
'conv2'
):
W_conv2
=
weight_variable
([
self
.
conv_size
,
self
.
conv_size
,
self
.
channel_1_num
,
self
.
channel_2_num
])
b_conv2
=
bias_variable
([
self
.
channel_2_num
])
h_conv2
=
tf
.
nn
.
relu
(
conv2d
(
h_pool1
,
W_conv2
)
+
b_conv2
)
# Second pooling layer.
with
tf
.
name_scope
(
'pool2'
):
#"""@nni.dynamic(input={cnn_block:1, concat:2},function_choice={"cnn_block":(x,nni.choice([3,4])),"cnn_block":(x),"concat":(x,y)},limit={"cnn_block.input":[concat,input],"concat.input":[this.depth-1,this.depth-3,this.depth-5],"graph.width":[1]})"""
h_pool2
=
max_pool
(
h_conv2
,
self
.
pool_size
)
# Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image
# is down to 7x7x64 feature maps -- maps this to 1024 features.
last_dim
=
int
(
input_dim
/
(
self
.
pool_size
*
self
.
pool_size
))
with
tf
.
name_scope
(
'fc1'
):
W_fc1
=
weight_variable
([
last_dim
*
last_dim
*
self
.
channel_2_num
,
self
.
hidden_size
])
b_fc1
=
bias_variable
([
self
.
hidden_size
])
h_pool2_flat
=
tf
.
reshape
(
h_pool2
,
[
-
1
,
last_dim
*
last_dim
*
self
.
channel_2_num
])
h_fc1
=
tf
.
nn
.
relu
(
tf
.
matmul
(
h_pool2_flat
,
W_fc1
)
+
b_fc1
)
# Dropout - controls the complexity of the model, prevents co-adaptation of features.
with
tf
.
name_scope
(
'dropout'
):
h_fc1_drop
=
tf
.
nn
.
dropout
(
h_fc1
,
self
.
keep_prob
)
# Map the 1024 features to 10 classes, one for each digit
with
tf
.
name_scope
(
'fc2'
):
W_fc2
=
weight_variable
([
self
.
hidden_size
,
self
.
y_dim
])
b_fc2
=
bias_variable
([
self
.
y_dim
])
y_conv
=
tf
.
matmul
(
h_fc1_drop
,
W_fc2
)
+
b_fc2
with
tf
.
name_scope
(
'loss'
):
cross_entropy
=
tf
.
reduce_mean
(
tf
.
nn
.
softmax_cross_entropy_with_logits
(
labels
=
self
.
y
,
logits
=
y_conv
))
with
tf
.
name_scope
(
'adam_optimizer'
):
...
...
@@ -121,7 +121,7 @@ def max_pool(x, pool_size):
strides
=
[
1
,
pool_size
,
pool_size
,
1
],
padding
=
'SAME'
)
def
avg_pool
(
x
,
pool_size
):
return
tf
.
nn
.
avg_pool
(
x
,
ksize
=
[
1
,
pool_size
,
pool_size
,
1
],
strides
=
[
1
,
pool_size
,
pool_size
,
1
],
padding
=
'SAME'
)
strides
=
[
1
,
pool_size
,
pool_size
,
1
],
padding
=
'SAME'
)
def
weight_variable
(
shape
):
"""weight_variable generates a weight variable of a given shape."""
...
...
@@ -163,12 +163,12 @@ def main():
'''@nni.variable(nni.choice(1,5),name=dropout_rate)'''
dropout_rate
=
0.5
mnist_network
.
train_step
.
run
(
feed_dict
=
{
mnist_network
.
x
:
batch
[
0
],
mnist_network
.
y
:
batch
[
1
],
mnist_network
.
keep_prob
:
dropout_rate
})
if
i
%
100
==
0
:
#train_accuracy = mnist_network.accuracy.eval(feed_dict={
# mnist_network.x: batch[0], mnist_network.y: batch[1], mnist_network.keep_prob: params['dropout_rate']})
#print('step %d, training accuracy %g' % (i, train_accuracy))
test_acc
=
mnist_network
.
accuracy
.
eval
(
feed_dict
=
{
mnist_network
.
x
:
mnist
.
test
.
images
,
mnist_network
.
y
:
mnist
.
test
.
labels
,
mnist_network
.
keep_prob
:
1.0
})
'''@nni.report_intermediate_result(test_acc)'''
...
...
@@ -196,7 +196,7 @@ if __name__ == '__main__':
#FLAGS, unparsed = parse_command()
#original_params = parse_init_json(FLAGS.init_file_path, {})
#pipe_interface.set_params_to_env()
'''@nni.get_next_parameter()'''
try
:
...
...
tools/nni_cmd/config_schema.py
View file @
ae7a72bc
...
...
@@ -128,7 +128,7 @@ advisor_schema_dict = {
'optimize_mode'
:
setChoice
(
'optimize_mode'
,
'maximize'
,
'minimize'
),
Optional
(
'min_budget'
):
setNumberRange
(
'min_budget'
,
int
,
0
,
9999
),
Optional
(
'max_budget'
):
setNumberRange
(
'max_budget'
,
int
,
0
,
9999
),
Optional
(
'eta'
):
setNumberRange
(
'eta'
,
int
,
0
,
9999
),
Optional
(
'eta'
):
setNumberRange
(
'eta'
,
int
,
0
,
9999
),
Optional
(
'min_points_in_model'
):
setNumberRange
(
'min_points_in_model'
,
int
,
0
,
9999
),
Optional
(
'top_n_percent'
):
setNumberRange
(
'top_n_percent'
,
int
,
1
,
99
),
Optional
(
'num_samples'
):
setNumberRange
(
'num_samples'
,
int
,
1
,
9999
),
...
...
@@ -235,7 +235,7 @@ kubeflow_trial_schema = {
'cpuNum'
:
setNumberRange
(
'cpuNum'
,
int
,
0
,
99999
),
'memoryMB'
:
setType
(
'memoryMB'
,
int
),
'image'
:
setType
(
'image'
,
str
)
}
}
}
}
...
...
tools/nni_cmd/config_utils.py
View file @
ae7a72bc
...
...
@@ -83,7 +83,7 @@ class Experiments:
self
.
experiments
[
id
][
'fileName'
]
=
file_name
self
.
experiments
[
id
][
'platform'
]
=
platform
self
.
write_file
()
def
update_experiment
(
self
,
id
,
key
,
value
):
'''Update experiment'''
if
id
not
in
self
.
experiments
:
...
...
@@ -91,17 +91,17 @@ class Experiments:
self
.
experiments
[
id
][
key
]
=
value
self
.
write_file
()
return
True
def
remove_experiment
(
self
,
id
):
'''remove an experiment by id'''
if
id
in
self
.
experiments
:
self
.
experiments
.
pop
(
id
)
self
.
write_file
()
def
get_all_experiments
(
self
):
'''return all of experiments'''
return
self
.
experiments
def
write_file
(
self
):
'''save config to local file'''
try
:
...
...
tools/nni_cmd/launcher.py
View file @
ae7a72bc
...
...
@@ -103,7 +103,7 @@ def start_rest_server(port, platform, mode, config_file_name, experiment_id=None
print_error
(
'Port %s is used by another process, please reset the port!
\n
'
\
'You could use
\'
nnictl create --help
\'
to get help information'
%
port
)
exit
(
1
)
if
(
platform
!=
'local'
)
and
detect_port
(
int
(
port
)
+
1
):
print_error
(
'PAI mode need an additional adjacent port %d, and the port %d is used by another process!
\n
'
\
'You could set another port to start experiment!
\n
'
\
...
...
@@ -111,7 +111,7 @@ def start_rest_server(port, platform, mode, config_file_name, experiment_id=None
exit
(
1
)
print_normal
(
'Starting restful server...'
)
entry_dir
=
get_nni_installation_path
()
entry_file
=
os
.
path
.
join
(
entry_dir
,
'main.js'
)
...
...
@@ -221,7 +221,7 @@ def setNNIManagerIp(experiment_config, port, config_file_name):
return
True
,
None
def
set_pai_config
(
experiment_config
,
port
,
config_file_name
):
'''set pai configuration'''
'''set pai configuration'''
pai_config_data
=
dict
()
pai_config_data
[
'pai_config'
]
=
experiment_config
[
'paiConfig'
]
response
=
rest_put
(
cluster_metadata_url
(
port
),
json
.
dumps
(
pai_config_data
),
REST_TIME_OUT
)
...
...
@@ -240,7 +240,7 @@ def set_pai_config(experiment_config, port, config_file_name):
return
set_trial_config
(
experiment_config
,
port
,
config_file_name
),
err_message
def
set_kubeflow_config
(
experiment_config
,
port
,
config_file_name
):
'''set kubeflow configuration'''
'''set kubeflow configuration'''
kubeflow_config_data
=
dict
()
kubeflow_config_data
[
'kubeflow_config'
]
=
experiment_config
[
'kubeflowConfig'
]
response
=
rest_put
(
cluster_metadata_url
(
port
),
json
.
dumps
(
kubeflow_config_data
),
REST_TIME_OUT
)
...
...
@@ -259,7 +259,7 @@ def set_kubeflow_config(experiment_config, port, config_file_name):
return
set_trial_config
(
experiment_config
,
port
,
config_file_name
),
err_message
def
set_frameworkcontroller_config
(
experiment_config
,
port
,
config_file_name
):
'''set kubeflow configuration'''
'''set kubeflow configuration'''
frameworkcontroller_config_data
=
dict
()
frameworkcontroller_config_data
[
'frameworkcontroller_config'
]
=
experiment_config
[
'frameworkcontrollerConfig'
]
response
=
rest_put
(
cluster_metadata_url
(
port
),
json
.
dumps
(
frameworkcontroller_config_data
),
REST_TIME_OUT
)
...
...
@@ -319,7 +319,7 @@ def set_experiment(experiment_config, mode, port, config_file_name):
{
'key'
:
'trial_config'
,
'value'
:
experiment_config
[
'trial'
]})
elif
experiment_config
[
'trainingServicePlatform'
]
==
'pai'
:
request_data
[
'clusterMetaData'
].
append
(
{
'key'
:
'pai_config'
,
'value'
:
experiment_config
[
'paiConfig'
]})
{
'key'
:
'pai_config'
,
'value'
:
experiment_config
[
'paiConfig'
]})
request_data
[
'clusterMetaData'
].
append
(
{
'key'
:
'trial_config'
,
'value'
:
experiment_config
[
'trial'
]})
elif
experiment_config
[
'trainingServicePlatform'
]
==
'kubeflow'
:
...
...
@@ -424,7 +424,7 @@ def launch_experiment(args, experiment_config, mode, config_file_name, experimen
except
Exception
:
raise
Exception
(
ERROR_INFO
%
'Rest server stopped!'
)
exit
(
1
)
#set pai config
if
experiment_config
[
'trainingServicePlatform'
]
==
'pai'
:
print_normal
(
'Setting pai config...'
)
...
...
@@ -439,7 +439,7 @@ def launch_experiment(args, experiment_config, mode, config_file_name, experimen
except
Exception
:
raise
Exception
(
ERROR_INFO
%
'Restful server stopped!'
)
exit
(
1
)
#set kubeflow config
if
experiment_config
[
'trainingServicePlatform'
]
==
'kubeflow'
:
print_normal
(
'Setting kubeflow config...'
)
...
...
@@ -454,7 +454,7 @@ def launch_experiment(args, experiment_config, mode, config_file_name, experimen
except
Exception
:
raise
Exception
(
ERROR_INFO
%
'Restful server stopped!'
)
exit
(
1
)
#set kubeflow config
if
experiment_config
[
'trainingServicePlatform'
]
==
'frameworkcontroller'
:
print_normal
(
'Setting frameworkcontroller config...'
)
...
...
@@ -493,7 +493,7 @@ def launch_experiment(args, experiment_config, mode, config_file_name, experimen
else
:
web_ui_url_list
=
get_local_urls
(
args
.
port
)
nni_config
.
set_config
(
'webuiUrl'
,
web_ui_url_list
)
#save experiment information
nnictl_experiment_config
=
Experiments
()
nnictl_experiment_config
.
add_experiment
(
experiment_id
,
args
.
port
,
start_time
,
config_file_name
,
experiment_config
[
'trainingServicePlatform'
])
...
...
tools/nni_cmd/launcher_utils.py
View file @
ae7a72bc
...
...
@@ -21,7 +21,7 @@
import
os
import
json
from
.config_schema
import
LOCAL_CONFIG_SCHEMA
,
REMOTE_CONFIG_SCHEMA
,
PAI_CONFIG_SCHEMA
,
KUBEFLOW_CONFIG_SCHEMA
,
FRAMEWORKCONTROLLER_CONFIG_SCHEMA
,
\
tuner_schema_dict
,
advisor_schema_dict
,
assessor_schema_dict
tuner_schema_dict
,
advisor_schema_dict
,
assessor_schema_dict
from
schema
import
SchemaMissingKeyError
,
SchemaForbiddenKeyError
,
SchemaUnexpectedTypeError
,
SchemaWrongKeyError
,
SchemaError
from
.common_utils
import
get_json_content
,
print_error
,
print_warning
,
print_normal
from
schema
import
Schema
,
And
,
Use
,
Optional
,
Regex
,
Or
...
...
@@ -62,7 +62,7 @@ def parse_path(experiment_config, config_path):
expand_path
(
experiment_config
[
'assessor'
],
'codeDir'
)
if
experiment_config
.
get
(
'advisor'
):
expand_path
(
experiment_config
[
'advisor'
],
'codeDir'
)
#if users use relative path, convert it to absolute path
root_path
=
os
.
path
.
dirname
(
config_path
)
if
experiment_config
.
get
(
'searchSpacePath'
):
...
...
@@ -80,8 +80,8 @@ def parse_path(experiment_config, config_path):
parse_relative_path
(
root_path
,
experiment_config
[
'machineList'
][
index
],
'sshKeyPath'
)
def
validate_search_space_content
(
experiment_config
):
'''Validate searchspace content,
if the searchspace file is not json format or its values does not contain _type and _value which must be specified,
'''Validate searchspace content,
if the searchspace file is not json format or its values does not contain _type and _value which must be specified,
it will not be a valid searchspace file'''
try
:
search_space_content
=
json
.
load
(
open
(
experiment_config
.
get
(
'searchSpacePath'
),
'r'
))
...
...
@@ -110,7 +110,7 @@ def validate_kubeflow_operators(experiment_config):
if
experiment_config
.
get
(
'trial'
).
get
(
'master'
)
is
None
:
print_error
(
'kubeflow with pytorch-operator must set master'
)
exit
(
1
)
if
experiment_config
.
get
(
'kubeflowConfig'
).
get
(
'storage'
)
==
'nfs'
:
if
experiment_config
.
get
(
'kubeflowConfig'
).
get
(
'nfs'
)
is
None
:
print_error
(
'please set nfs configuration!'
)
...
...
@@ -170,7 +170,7 @@ def validate_common_content(experiment_config):
else
:
print_error
(
error
)
exit
(
1
)
#set default value
if
experiment_config
.
get
(
'maxExecDuration'
)
is
None
:
experiment_config
[
'maxExecDuration'
]
=
'999d'
...
...
tools/nni_cmd/nnictl.py
View file @
ae7a72bc
...
...
@@ -176,7 +176,7 @@ def parse_args():
parser_package_subparsers
=
parser_package
.
add_subparsers
()
parser_package_install
=
parser_package_subparsers
.
add_parser
(
'install'
,
help
=
'install packages'
)
parser_package_install
.
add_argument
(
'--name'
,
'-n'
,
dest
=
'name'
,
help
=
'package name to be installed'
)
parser_package_install
.
set_defaults
(
func
=
package_install
)
parser_package_install
.
set_defaults
(
func
=
package_install
)
parser_package_show
=
parser_package_subparsers
.
add_parser
(
'show'
,
help
=
'show the information of packages'
)
parser_package_show
.
set_defaults
(
func
=
package_show
)
...
...
tools/nni_cmd/nnictl_utils.py
View file @
ae7a72bc
...
...
@@ -321,7 +321,7 @@ def log_internal(args, filetype):
else
:
file_full_path
=
os
.
path
.
join
(
NNICTL_HOME_DIR
,
file_name
,
'stderr'
)
print
(
check_output_command
(
file_full_path
,
head
=
args
.
head
,
tail
=
args
.
tail
))
def
log_stdout
(
args
):
'''get stdout log'''
log_internal
(
args
,
'stdout'
)
...
...
@@ -393,7 +393,7 @@ def experiment_list(args):
print_warning
(
'There is no experiment running...
\n
You can use
\'
nnictl experiment list all
\'
to list all stopped experiments!'
)
experiment_information
=
""
for
key
in
experiment_id_list
:
experiment_information
+=
(
EXPERIMENT_DETAIL_FORMAT
%
(
key
,
experiment_dict
[
key
][
'status'
],
experiment_dict
[
key
][
'port'
],
\
experiment_dict
[
key
].
get
(
'platform'
),
experiment_dict
[
key
][
'startTime'
],
experiment_dict
[
key
][
'endTime'
]))
print
(
EXPERIMENT_INFORMATION_FORMAT
%
experiment_information
)
...
...
tools/nni_cmd/package_management.py
View file @
ae7a72bc
...
...
@@ -36,8 +36,8 @@ def process_install(package_name):
def
package_install
(
args
):
'''install packages'''
process_install
(
args
.
name
)
def
package_show
(
args
):
'''show all packages'''
print
(
' '
.
join
(
PACKAGE_REQUIREMENTS
.
keys
()))
tools/nni_cmd/updater.py
View file @
ae7a72bc
...
...
@@ -112,7 +112,7 @@ def update_concurrency(args):
print_error
(
'Update %s failed!'
%
'concurrency'
)
def
update_duration
(
args
):
#parse time, change time unit to seconds
#parse time, change time unit to seconds
args
.
value
=
parse_time
(
args
.
value
)
args
.
port
=
get_experiment_port
(
args
)
if
args
.
port
is
not
None
:
...
...
tools/nni_trial_tool/hdfsClientUtility.py
View file @
ae7a72bc
...
...
@@ -40,16 +40,16 @@ def copyHdfsDirectoryToLocal(hdfsDirectory, localDirectory, hdfsClient):
copyHdfsDirectoryToLocal
(
subHdfsDirectory
,
subLocalDirectory
,
hdfsClient
)
elif
f
.
type
==
'FILE'
:
hdfsFilePath
=
posixpath
.
join
(
hdfsDirectory
,
f
.
pathSuffix
)
localFilePath
=
os
.
path
.
join
(
localDirectory
,
f
.
pathSuffix
)
localFilePath
=
os
.
path
.
join
(
localDirectory
,
f
.
pathSuffix
)
copyHdfsFileToLocal
(
hdfsFilePath
,
localFilePath
,
hdfsClient
)
else
:
else
:
raise
AssertionError
(
'unexpected type {}'
.
format
(
f
.
type
))
def
copyHdfsFileToLocal
(
hdfsFilePath
,
localFilePath
,
hdfsClient
,
override
=
True
):
'''Copy file from HDFS to local'''
if
not
hdfsClient
.
exists
(
hdfsFilePath
):
raise
Exception
(
'HDFS file {} does not exist!'
.
format
(
hdfsFilePath
))
try
:
try
:
file_status
=
hdfsClient
.
get_file_status
(
hdfsFilePath
)
if
file_status
.
type
!=
'FILE'
:
raise
Exception
(
'HDFS file path {} is not a file'
.
format
(
hdfsFilePath
))
...
...
tools/nni_trial_tool/log_utils.py
View file @
ae7a72bc
...
...
@@ -142,7 +142,7 @@ class PipeLogReader(threading.Thread):
'''
time
.
sleep
(
5
)
while
True
:
cur_process_exit
=
self
.
process_exit
cur_process_exit
=
self
.
process_exit
try
:
line
=
self
.
queue
.
get
(
True
,
5
)
try
:
...
...
@@ -150,7 +150,7 @@ class PipeLogReader(threading.Thread):
except
Exception
as
e
:
pass
except
Exception
as
e
:
if
cur_process_exit
==
True
:
if
cur_process_exit
==
True
:
self
.
_is_read_completed
=
True
break
...
...
@@ -177,7 +177,7 @@ class PipeLogReader(threading.Thread):
if
not
self
.
log_pattern
.
match
(
line
):
continue
self
.
queue
.
put
(
line
)
self
.
pipeReader
.
close
()
def
close
(
self
):
...
...
@@ -190,7 +190,7 @@ class PipeLogReader(threading.Thread):
"""Return if read is completed
"""
return
self
.
_is_read_completed
def
set_process_exit
(
self
):
self
.
process_exit
=
True
return
self
.
process_exit
\ No newline at end of file
tools/nni_trial_tool/test/test_hdfsClientUtility.py
View file @
ae7a72bc
...
...
@@ -39,9 +39,9 @@ class HDFSClientUtilityTest(unittest.TestCase):
self
.
hdfs_config
=
json
.
load
(
file
)
except
Exception
as
exception
:
print
(
exception
)
self
.
hdfs_client
=
HdfsClient
(
hosts
=
'{0}:{1}'
.
format
(
self
.
hdfs_config
[
'host'
],
'50070'
),
user_name
=
self
.
hdfs_config
[
'userName'
])
def
get_random_name
(
self
,
length
):
return
''
.
join
(
random
.
sample
(
string
.
ascii_letters
+
string
.
digits
,
length
))
...
...
@@ -49,20 +49,20 @@ class HDFSClientUtilityTest(unittest.TestCase):
'''test copyFileToHdfs'''
file_name
=
self
.
get_random_name
(
8
)
file_content
=
'hello world!'
with
open
(
'./{}'
.
format
(
file_name
),
'w'
)
as
file
:
file
.
write
(
file_content
)
file
.
write
(
file_content
)
result
=
copyFileToHdfs
(
'./{}'
.
format
(
file_name
),
'/{0}/{1}'
.
format
(
self
.
hdfs_config
[
'userName'
],
file_name
),
self
.
hdfs_client
)
self
.
assertTrue
(
result
)
file_list
=
self
.
hdfs_client
.
listdir
(
'/{0}'
.
format
(
self
.
hdfs_config
[
'userName'
]))
self
.
assertIn
(
file_name
,
file_list
)
hdfs_file_name
=
self
.
get_random_name
(
8
)
self
.
hdfs_client
.
copy_to_local
(
'/{0}/{1}'
.
format
(
self
.
hdfs_config
[
'userName'
],
file_name
),
'./{}'
.
format
(
hdfs_file_name
))
self
.
assertTrue
(
os
.
path
.
exists
(
'./{}'
.
format
(
hdfs_file_name
)))
with
open
(
'./{}'
.
format
(
hdfs_file_name
),
'r'
)
as
file
:
content
=
file
.
readline
()
self
.
assertEqual
(
file_content
,
content
)
...
...
@@ -70,21 +70,21 @@ class HDFSClientUtilityTest(unittest.TestCase):
os
.
remove
(
'./{}'
.
format
(
file_name
))
os
.
remove
(
'./{}'
.
format
(
hdfs_file_name
))
self
.
hdfs_client
.
delete
(
'/{0}/{1}'
.
format
(
self
.
hdfs_config
[
'userName'
],
file_name
))
def
test_copy_directory_run
(
self
):
'''test copyDirectoryToHdfs'''
directory_name
=
self
.
get_random_name
(
8
)
file_name_list
=
[
self
.
get_random_name
(
8
),
self
.
get_random_name
(
8
)]
file_content
=
'hello world!'
os
.
makedirs
(
'./{}'
.
format
(
directory_name
))
for
file_name
in
file_name_list
:
with
open
(
'./{0}/{1}'
.
format
(
directory_name
,
file_name
),
'w'
)
as
file
:
file
.
write
(
file_content
)
result
=
copyDirectoryToHdfs
(
'./{}'
.
format
(
directory_name
),
'/{0}/{1}'
.
format
(
self
.
hdfs_config
[
'userName'
],
directory_name
),
self
.
hdfs_client
)
self
.
assertTrue
(
result
)
directory_list
=
self
.
hdfs_client
.
listdir
(
'/{0}'
.
format
(
self
.
hdfs_config
[
'userName'
]))
self
.
assertIn
(
directory_name
,
directory_list
)
...
...
@@ -94,7 +94,7 @@ class HDFSClientUtilityTest(unittest.TestCase):
#clean up
self
.
hdfs_client
.
delete
(
'/{0}/{1}/{2}'
.
format
(
self
.
hdfs_config
[
'userName'
],
directory_name
,
file_name
))
self
.
hdfs_client
.
delete
(
'/{0}/{1}'
.
format
(
self
.
hdfs_config
[
'userName'
],
directory_name
))
shutil
.
rmtree
(
'./{}'
.
format
(
directory_name
))
if
__name__
==
'__main__'
:
...
...
tools/nni_trial_tool/trial_keeper.py
View file @
ae7a72bc
...
...
@@ -42,10 +42,10 @@ regular = re.compile('v?(?P<version>[0-9](\.[0-9]){0,1}).*')
def
main_loop
(
args
):
'''main loop logic for trial keeper'''
if
not
os
.
path
.
exists
(
LOG_DIR
):
os
.
makedirs
(
LOG_DIR
)
stdout_file
=
open
(
STDOUT_FULL_PATH
,
'a+'
)
stderr_file
=
open
(
STDERR_FULL_PATH
,
'a+'
)
trial_keeper_syslogger
=
RemoteLogger
(
args
.
nnimanager_ip
,
args
.
nnimanager_port
,
'trial_keeper'
,
StdOutputType
.
Stdout
,
args
.
log_collection
)
...
...
uninstall.ps1
View file @
ae7a72bc
...
...
@@ -15,7 +15,7 @@ else{
$PIP_UNINSTALL
=
"""
$NNI_PYTHON3
\python"" -m pip uninstall -y "
$NNI_NODE_FOLDER
=
$NNI_DEPENDENCY_FOLDER
+
"\nni-node"
$NNI_YARN_FOLDER
=
$NNI_DEPENDENCY_FOLDER
+
"\nni-yarn"
# uninstall
Remove-Item
$NNI_PKG_FOLDER
-Recurse
-Force
cmd
/C
$PIP_UNINSTALL
"nni"
...
...
Prev
1
…
5
6
7
8
9
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment