Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
nni
Commits
025e0b46
Unverified
Commit
025e0b46
authored
Nov 02, 2019
by
chicm-ms
Committed by
GitHub
Nov 02, 2019
Browse files
Windows pr pipeline (#1682)
parent
0521a0c2
Changes
9
Show whitespace changes
Inline
Side-by-side
Showing
9 changed files
with
162 additions
and
34 deletions
+162
-34
azure-pipelines.yml
azure-pipelines.yml
+43
-3
test/metrics_test.py
test/metrics_test.py
+9
-3
test/metrics_test/metrics_win32.test.yml
test/metrics_test/metrics_win32.test.yml
+20
-0
test/naive_test.py
test/naive_test.py
+23
-15
test/naive_test/local_win32.yml
test/naive_test/local_win32.yml
+26
-0
test/tuner_test.py
test/tuner_test.py
+11
-2
test/tuner_test/local_win32.yml
test/tuner_test/local_win32.yml
+20
-0
test/unittest.ps1
test/unittest.ps1
+1
-1
test/utils.py
test/utils.py
+9
-10
No files found.
azure-pipelines.yml
View file @
025e0b46
...
@@ -13,8 +13,8 @@ jobs:
...
@@ -13,8 +13,8 @@ jobs:
-
script
:
|
-
script
:
|
python3 -m pip install torch==0.4.1 --user
python3 -m pip install torch==0.4.1 --user
python3 -m pip install torchvision==0.2.1 --user
python3 -m pip install torchvision==0.2.1 --user
python3 -m pip install tensorflow==1.1
2.0
--user
python3 -m pip install tensorflow==1.1
3.1
--user
displayName
:
'
Install
dependencies
for
integration
'
displayName
:
'
Install
dependencies'
-
script
:
|
-
script
:
|
source install.sh
source install.sh
displayName
:
'
Install
nni
toolkit
via
source
code'
displayName
:
'
Install
nni
toolkit
via
source
code'
...
@@ -59,7 +59,7 @@ jobs:
...
@@ -59,7 +59,7 @@ jobs:
python3 -m pip install torch==0.4.1 --user
python3 -m pip install torch==0.4.1 --user
python3 -m pip install torchvision==0.2.1 --user
python3 -m pip install torchvision==0.2.1 --user
python3 -m pip install tensorflow==1.13.1 --user
python3 -m pip install tensorflow==1.13.1 --user
displayName
:
'
Install
dependencies
for
integration
'
displayName
:
'
Install
dependencies'
-
script
:
|
-
script
:
|
source install.sh
source install.sh
displayName
:
'
Install
nni
toolkit
via
source
code'
displayName
:
'
Install
nni
toolkit
via
source
code'
...
@@ -79,3 +79,43 @@ jobs:
...
@@ -79,3 +79,43 @@ jobs:
cd test
cd test
PATH=$HOME/Library/Python/3.7/bin:$PATH python3 cli_test.py
PATH=$HOME/Library/Python/3.7/bin:$PATH python3 cli_test.py
displayName
:
'
nnicli
test'
displayName
:
'
nnicli
test'
-
job
:
'
basic_test_pr_Windows'
pool
:
vmImage
:
'
vs2017-win2016'
strategy
:
matrix
:
Python36
:
PYTHON_VERSION
:
'
3.6'
steps
:
-
script
:
|
powershell.exe -file install.ps1
displayName
:
'
Install
nni
toolkit
via
source
code'
-
script
:
|
python -m pip install scikit-learn==0.20.0 --user
python -m pip install keras==2.1.6 --user
python -m pip install https://download.pytorch.org/whl/cu90/torch-0.4.1-cp36-cp36m-win_amd64.whl --user
python -m pip install torchvision --user
python -m pip install tensorflow==1.13.1 --user
displayName
:
'
Install
dependencies'
-
script
:
|
cd test
powershell.exe -file unittest.ps1
displayName
:
'
unit
test'
-
script
:
|
cd test
python naive_test.py
displayName
:
'
Naive
test'
-
script
:
|
cd test
python tuner_test.py
displayName
:
'
Built-in
tuners
/
assessors
tests'
-
script
:
|
cd test
python metrics_test.py
displayName
:
'
Trial
job
metrics
test'
-
script
:
|
cd test
PATH=$HOME/.local/bin:$PATH python3 cli_test.py
displayName
:
'
nnicli
test'
test/metrics_test.py
View file @
025e0b46
...
@@ -18,18 +18,23 @@
...
@@ -18,18 +18,23 @@
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import
sys
import
os.path
as
osp
import
subprocess
import
subprocess
import
time
import
time
import
traceback
import
traceback
import
json
import
json
import
requests
import
requests
from
utils
import
get_experiment_status
,
get_yml_content
,
parse_max_duration_time
,
get_succeeded_trial_num
from
utils
import
get_experiment_status
,
get_yml_content
,
parse_max_duration_time
,
get_succeeded_trial_num
,
print_stderr
from
utils
import
GREEN
,
RED
,
CLEAR
,
STATUS_URL
,
TRIAL_JOBS_URL
,
METRICS_URL
from
utils
import
GREEN
,
RED
,
CLEAR
,
STATUS_URL
,
TRIAL_JOBS_URL
,
METRICS_URL
def
run_test
():
def
run_test
():
'''run metrics test'''
'''run metrics test'''
config_file
=
'metrics_test/metrics.test.yml'
if
sys
.
platform
==
'win32'
:
config_file
=
osp
.
join
(
'metrics_test'
,
'metrics_win32.test.yml'
)
else
:
config_file
=
osp
.
join
(
'metrics_test'
,
'metrics.test.yml'
)
print
(
'Testing %s...'
%
config_file
)
print
(
'Testing %s...'
%
config_file
)
proc
=
subprocess
.
run
([
'nnictl'
,
'create'
,
'--config'
,
config_file
])
proc
=
subprocess
.
run
([
'nnictl'
,
'create'
,
'--config'
,
config_file
])
...
@@ -44,6 +49,7 @@ def run_test():
...
@@ -44,6 +49,7 @@ def run_test():
#print('experiment status:', status)
#print('experiment status:', status)
if
status
==
'DONE'
:
if
status
==
'DONE'
:
num_succeeded
=
get_succeeded_trial_num
(
TRIAL_JOBS_URL
)
num_succeeded
=
get_succeeded_trial_num
(
TRIAL_JOBS_URL
)
print_stderr
(
TRIAL_JOBS_URL
)
assert
num_succeeded
==
max_trial_num
,
'only %d succeeded trial jobs, there should be %d'
%
(
num_succeeded
,
max_trial_num
)
assert
num_succeeded
==
max_trial_num
,
'only %d succeeded trial jobs, there should be %d'
%
(
num_succeeded
,
max_trial_num
)
check_metrics
()
check_metrics
()
break
break
...
@@ -51,7 +57,7 @@ def run_test():
...
@@ -51,7 +57,7 @@ def run_test():
assert
status
==
'DONE'
,
'Failed to finish in maxExecDuration'
assert
status
==
'DONE'
,
'Failed to finish in maxExecDuration'
def
check_metrics
():
def
check_metrics
():
with
open
(
'metrics_test
/
expected_metrics.json'
,
'r'
)
as
f
:
with
open
(
osp
.
join
(
'metrics_test
'
,
'
expected_metrics.json'
)
,
'r'
)
as
f
:
expected_metrics
=
json
.
load
(
f
)
expected_metrics
=
json
.
load
(
f
)
print
(
expected_metrics
)
print
(
expected_metrics
)
metrics
=
requests
.
get
(
METRICS_URL
).
json
()
metrics
=
requests
.
get
(
METRICS_URL
).
json
()
...
...
test/metrics_test/metrics_win32.test.yml
0 → 100644
View file @
025e0b46
authorName
:
nni
experimentName
:
default_test
maxExecDuration
:
3m
maxTrialNum
:
1
trialConcurrency
:
1
searchSpacePath
:
./search_space.json
tuner
:
builtinTunerName
:
Random
trial
:
codeDir
:
.
command
:
python trial.py
gpuNum
:
0
useAnnotation
:
false
multiPhase
:
false
multiThread
:
false
trainingServicePlatform
:
local
test/naive_test.py
View file @
025e0b46
...
@@ -18,6 +18,8 @@
...
@@ -18,6 +18,8 @@
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import
sys
import
os.path
as
osp
import
json
import
json
import
subprocess
import
subprocess
import
sys
import
sys
...
@@ -30,10 +32,14 @@ from utils import GREEN, RED, CLEAR, EXPERIMENT_URL
...
@@ -30,10 +32,14 @@ from utils import GREEN, RED, CLEAR, EXPERIMENT_URL
def
naive_test
():
def
naive_test
():
'''run naive integration test'''
'''run naive integration test'''
to_remove
=
[
'tuner_search_space.json'
,
'tuner_result.txt'
,
'assessor_result.txt'
]
to_remove
=
[
'tuner_search_space.json'
,
'tuner_result.txt'
,
'assessor_result.txt'
]
to_remove
=
list
(
map
(
lambda
file
:
'naive_test
/'
+
file
,
to_remove
))
to_remove
=
list
(
map
(
lambda
file
:
osp
.
join
(
'naive_test
'
,
file
)
,
to_remove
))
remove_files
(
to_remove
)
remove_files
(
to_remove
)
proc
=
subprocess
.
run
([
'nnictl'
,
'create'
,
'--config'
,
'naive_test/local.yml'
])
if
sys
.
platform
==
'win32'
:
config_file
=
'local_win32.yml'
else
:
config_file
=
'local.yml'
proc
=
subprocess
.
run
([
'nnictl'
,
'create'
,
'--config'
,
osp
.
join
(
'naive_test'
,
config_file
)])
assert
proc
.
returncode
==
0
,
'`nnictl create` failed with code %d'
%
proc
.
returncode
assert
proc
.
returncode
==
0
,
'`nnictl create` failed with code %d'
%
proc
.
returncode
print
(
'Spawning trials...'
)
print
(
'Spawning trials...'
)
...
@@ -44,8 +50,8 @@ def naive_test():
...
@@ -44,8 +50,8 @@ def naive_test():
for
_
in
range
(
120
):
for
_
in
range
(
120
):
time
.
sleep
(
1
)
time
.
sleep
(
1
)
tuner_status
=
read_last_line
(
'naive_test
/
tuner_result.txt'
)
tuner_status
=
read_last_line
(
osp
.
join
(
'naive_test
'
,
'
tuner_result.txt'
)
)
assessor_status
=
read_last_line
(
'naive_test
/
assessor_result.txt'
)
assessor_status
=
read_last_line
(
osp
.
join
(
'naive_test
'
,
'
assessor_result.txt'
)
)
experiment_status
=
is_experiment_done
(
nnimanager_log_path
)
experiment_status
=
is_experiment_done
(
nnimanager_log_path
)
assert
tuner_status
!=
'ERROR'
,
'Tuner exited with error'
assert
tuner_status
!=
'ERROR'
,
'Tuner exited with error'
...
@@ -55,7 +61,7 @@ def naive_test():
...
@@ -55,7 +61,7 @@ def naive_test():
break
break
if
tuner_status
is
not
None
:
if
tuner_status
is
not
None
:
for
line
in
open
(
'naive_test
/
tuner_result.txt'
):
for
line
in
open
(
osp
.
join
(
'naive_test
'
,
'
tuner_result.txt'
)
)
:
if
line
.
strip
()
==
'ERROR'
:
if
line
.
strip
()
==
'ERROR'
:
break
break
trial
=
int
(
line
.
split
(
' '
)[
0
])
trial
=
int
(
line
.
split
(
' '
)[
0
])
...
@@ -65,18 +71,20 @@ def naive_test():
...
@@ -65,18 +71,20 @@ def naive_test():
assert
experiment_status
,
'Failed to finish in 2 min'
assert
experiment_status
,
'Failed to finish in 2 min'
ss1
=
json
.
load
(
open
(
'naive_test
/
search_space.json'
))
ss1
=
json
.
load
(
open
(
osp
.
join
(
'naive_test
'
,
'
search_space.json'
))
)
ss2
=
json
.
load
(
open
(
'naive_test
/
tuner_search_space.json'
))
ss2
=
json
.
load
(
open
(
osp
.
join
(
'naive_test
'
,
'
tuner_search_space.json'
))
)
assert
ss1
==
ss2
,
'Tuner got wrong search space'
assert
ss1
==
ss2
,
'Tuner got wrong search space'
tuner_result
=
set
(
open
(
'naive_test
/
tuner_result.txt'
))
tuner_result
=
set
(
open
(
osp
.
join
(
'naive_test
'
,
'
tuner_result.txt'
))
)
expected
=
set
(
open
(
'naive_test
/
expected_tuner_result.txt'
))
expected
=
set
(
open
(
osp
.
join
(
'naive_test
'
,
'
expected_tuner_result.txt'
))
)
# Trials may complete before NNI gets assessor's result,
# Trials may complete before NNI gets assessor's result,
# so it is possible to have more final result than expected
# so it is possible to have more final result than expected
print
(
'Tuner result:'
,
tuner_result
)
print
(
'Expected tuner result:'
,
expected
)
assert
tuner_result
.
issuperset
(
expected
),
'Bad tuner result'
assert
tuner_result
.
issuperset
(
expected
),
'Bad tuner result'
assessor_result
=
set
(
open
(
'naive_test
/
assessor_result.txt'
))
assessor_result
=
set
(
open
(
osp
.
join
(
'naive_test
'
,
'
assessor_result.txt'
))
)
expected
=
set
(
open
(
'naive_test
/
expected_assessor_result.txt'
))
expected
=
set
(
open
(
osp
.
join
(
'naive_test
'
,
'
expected_assessor_result.txt'
))
)
assert
assessor_result
==
expected
,
'Bad assessor result'
assert
assessor_result
==
expected
,
'Bad assessor result'
subprocess
.
run
([
'nnictl'
,
'stop'
])
subprocess
.
run
([
'nnictl'
,
'stop'
])
...
@@ -85,10 +93,10 @@ def naive_test():
...
@@ -85,10 +93,10 @@ def naive_test():
def
stop_experiment_test
():
def
stop_experiment_test
():
'''Test `nnictl stop` command, including `nnictl stop exp_id` and `nnictl stop all`.
'''Test `nnictl stop` command, including `nnictl stop exp_id` and `nnictl stop all`.
Simple `nnictl stop` is not tested here since it is used in all other test code'''
Simple `nnictl stop` is not tested here since it is used in all other test code'''
subprocess
.
run
([
'nnictl'
,
'create'
,
'--config'
,
'tuner_test
/
local.yml'
,
'--port'
,
'8080'
],
check
=
True
)
subprocess
.
run
([
'nnictl'
,
'create'
,
'--config'
,
osp
.
join
(
'tuner_test
'
,
'
local.yml'
)
,
'--port'
,
'8080'
],
check
=
True
)
subprocess
.
run
([
'nnictl'
,
'create'
,
'--config'
,
'tuner_test
/
local.yml'
,
'--port'
,
'8888'
],
check
=
True
)
subprocess
.
run
([
'nnictl'
,
'create'
,
'--config'
,
osp
.
join
(
'tuner_test
'
,
'
local.yml'
)
,
'--port'
,
'8888'
],
check
=
True
)
subprocess
.
run
([
'nnictl'
,
'create'
,
'--config'
,
'tuner_test
/
local.yml'
,
'--port'
,
'8989'
],
check
=
True
)
subprocess
.
run
([
'nnictl'
,
'create'
,
'--config'
,
osp
.
join
(
'tuner_test
'
,
'
local.yml'
)
,
'--port'
,
'8989'
],
check
=
True
)
subprocess
.
run
([
'nnictl'
,
'create'
,
'--config'
,
'tuner_test
/
local.yml'
,
'--port'
,
'8990'
],
check
=
True
)
subprocess
.
run
([
'nnictl'
,
'create'
,
'--config'
,
osp
.
join
(
'tuner_test
'
,
'
local.yml'
)
,
'--port'
,
'8990'
],
check
=
True
)
# test cmd 'nnictl stop id`
# test cmd 'nnictl stop id`
experiment_id
=
get_experiment_id
(
EXPERIMENT_URL
)
experiment_id
=
get_experiment_id
(
EXPERIMENT_URL
)
...
...
test/naive_test/local_win32.yml
0 → 100644
View file @
025e0b46
authorName
:
nni
experimentName
:
naive
trialConcurrency
:
3
maxExecDuration
:
1h
maxTrialNum
:
10
#choice: local, remote
trainingServicePlatform
:
local
searchSpacePath
:
search_space.json
#choice: true, false
useAnnotation
:
false
tuner
:
codeDir
:
.
classFileName
:
naive_tuner.py
className
:
NaiveTuner
classArgs
:
optimize_mode
:
maximize
assessor
:
codeDir
:
.
classFileName
:
naive_assessor.py
className
:
NaiveAssessor
classArgs
:
optimize_mode
:
maximize
trial
:
command
:
python naive_trial.py
codeDir
:
.
gpuNum
:
0
test/tuner_test.py
View file @
025e0b46
...
@@ -18,6 +18,8 @@
...
@@ -18,6 +18,8 @@
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import
sys
import
os.path
as
osp
import
subprocess
import
subprocess
import
sys
import
sys
import
time
import
time
...
@@ -30,9 +32,16 @@ TUNER_LIST = ['GridSearch', 'BatchTuner', 'TPE', 'Random', 'Anneal', 'Evolution'
...
@@ -30,9 +32,16 @@ TUNER_LIST = ['GridSearch', 'BatchTuner', 'TPE', 'Random', 'Anneal', 'Evolution'
ASSESSOR_LIST
=
[
'Medianstop'
]
ASSESSOR_LIST
=
[
'Medianstop'
]
def
get_config_file_path
():
if
sys
.
platform
==
'win32'
:
config_file
=
osp
.
join
(
'tuner_test'
,
'local_win32.yml'
)
else
:
config_file
=
osp
.
join
(
'tuner_test'
,
'local.yml'
)
return
config_file
def
switch
(
dispatch_type
,
dispatch_name
):
def
switch
(
dispatch_type
,
dispatch_name
):
'''Change dispatch in config.yml'''
'''Change dispatch in config.yml'''
config_path
=
'tuner_test/local.yml'
config_path
=
get_config_file_path
()
experiment_config
=
get_yml_content
(
config_path
)
experiment_config
=
get_yml_content
(
config_path
)
if
dispatch_name
in
[
'GridSearch'
,
'BatchTuner'
,
'Random'
]:
if
dispatch_name
in
[
'GridSearch'
,
'BatchTuner'
,
'Random'
]:
experiment_config
[
dispatch_type
.
lower
()]
=
{
experiment_config
[
dispatch_type
.
lower
()]
=
{
...
@@ -56,7 +65,7 @@ def test_builtin_dispatcher(dispatch_type, dispatch_name):
...
@@ -56,7 +65,7 @@ def test_builtin_dispatcher(dispatch_type, dispatch_name):
switch
(
dispatch_type
,
dispatch_name
)
switch
(
dispatch_type
,
dispatch_name
)
print
(
'Testing %s...'
%
dispatch_name
)
print
(
'Testing %s...'
%
dispatch_name
)
proc
=
subprocess
.
run
([
'nnictl'
,
'create'
,
'--config'
,
'tuner_test/local.yml'
])
proc
=
subprocess
.
run
([
'nnictl'
,
'create'
,
'--config'
,
get_config_file_path
()
])
assert
proc
.
returncode
==
0
,
'`nnictl create` failed with code %d'
%
proc
.
returncode
assert
proc
.
returncode
==
0
,
'`nnictl create` failed with code %d'
%
proc
.
returncode
nnimanager_log_path
=
get_nni_log_path
(
EXPERIMENT_URL
)
nnimanager_log_path
=
get_nni_log_path
(
EXPERIMENT_URL
)
...
...
test/tuner_test/local_win32.yml
0 → 100644
View file @
025e0b46
assessor
:
builtinAssessorName
:
Medianstop
classArgs
:
optimize_mode
:
maximize
authorName
:
nni
experimentName
:
test_sdk
maxExecDuration
:
1h
maxTrialNum
:
2
searchSpacePath
:
search_space.json
trainingServicePlatform
:
local
trial
:
codeDir
:
.
command
:
python naive_trial.py
gpuNum
:
0
trialConcurrency
:
2
tuner
:
builtinTunerName
:
Evolution
classArgs
:
optimize_mode
:
maximize
useAnnotation
:
false
test/unittest.ps1
View file @
025e0b46
$CWD
=
$PWD
$CWD
=
$PWD
$Error
ActionPreference
=
"Stop"
# -------------For python unittest-------------
# -------------For python unittest-------------
## ------Run annotation test------
## ------Run annotation test------
...
...
test/utils.py
View file @
025e0b46
...
@@ -20,7 +20,6 @@
...
@@ -20,7 +20,6 @@
import
contextlib
import
contextlib
import
collections
import
collections
import
json
import
os
import
os
import
socket
import
socket
import
sys
import
sys
...
@@ -29,7 +28,7 @@ import requests
...
@@ -29,7 +28,7 @@ import requests
import
time
import
time
import
ruamel.yaml
as
yaml
import
ruamel.yaml
as
yaml
EXPERIMENT_DONE_SIGNAL
=
'
"
Experiment done
"
'
EXPERIMENT_DONE_SIGNAL
=
'Experiment done'
GREEN
=
'
\33
[32m'
GREEN
=
'
\33
[32m'
RED
=
'
\33
[31m'
RED
=
'
\33
[31m'
...
@@ -93,13 +92,11 @@ def get_nni_log_path(experiment_url):
...
@@ -93,13 +92,11 @@ def get_nni_log_path(experiment_url):
def
is_experiment_done
(
nnimanager_log_path
):
def
is_experiment_done
(
nnimanager_log_path
):
'''check if the experiment is done successfully'''
'''check if the experiment is done successfully'''
assert
os
.
path
.
exists
(
nnimanager_log_path
),
'Experiment starts failed'
assert
os
.
path
.
exists
(
nnimanager_log_path
),
'Experiment starts failed'
if
sys
.
platform
==
"win32"
:
cmds
=
[
'type'
,
nnimanager_log_path
,
'|'
,
'find'
,
EXPERIMENT_DONE_SIGNAL
]
else
:
cmds
=
[
'cat'
,
nnimanager_log_path
,
'|'
,
'grep'
,
EXPERIMENT_DONE_SIGNAL
]
completed_process
=
subprocess
.
run
(
' '
.
join
(
cmds
),
shell
=
True
)
return
completed_process
.
returncode
==
0
with
open
(
nnimanager_log_path
,
'r'
)
as
f
:
log_content
=
f
.
read
()
return
EXPERIMENT_DONE_SIGNAL
in
log_content
def
get_experiment_status
(
status_url
):
def
get_experiment_status
(
status_url
):
nni_status
=
requests
.
get
(
status_url
).
json
()
nni_status
=
requests
.
get
(
status_url
).
json
()
...
@@ -119,10 +116,12 @@ def print_stderr(trial_jobs_url):
...
@@ -119,10 +116,12 @@ def print_stderr(trial_jobs_url):
trial_jobs
=
requests
.
get
(
trial_jobs_url
).
json
()
trial_jobs
=
requests
.
get
(
trial_jobs_url
).
json
()
for
trial_job
in
trial_jobs
:
for
trial_job
in
trial_jobs
:
if
trial_job
[
'status'
]
==
'FAILED'
:
if
trial_job
[
'status'
]
==
'FAILED'
:
stderr_path
=
trial_job
[
'stderrPath'
].
split
(
':'
)[
-
1
]
if
sys
.
platform
==
"win32"
:
if
sys
.
platform
==
"win32"
:
p
=
trial_job
[
'stderrPath'
].
split
(
':'
)
stderr_path
=
':'
.
join
([
p
[
-
2
],
p
[
-
1
]])
subprocess
.
run
([
'type'
,
stderr_path
],
shell
=
True
)
subprocess
.
run
([
'type'
,
stderr_path
],
shell
=
True
)
else
:
else
:
stderr_path
=
trial_job
[
'stderrPath'
].
split
(
':'
)[
-
1
]
subprocess
.
run
([
'cat'
,
stderr_path
])
subprocess
.
run
([
'cat'
,
stderr_path
])
def
parse_max_duration_time
(
max_exec_duration
):
def
parse_max_duration_time
(
max_exec_duration
):
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment