Unverified Commit e5d61809 authored by Yuge Zhang's avatar Yuge Zhang Committed by GitHub
Browse files

Integrate coverage report into CI (#3854)

parent 51c6afde
......@@ -128,7 +128,7 @@ class FBNetTrainer(BaseTrainer):
layer_id = 0
for i, stage_name in enumerate(stages):
ops_names = [op for op in self.lookup_table.lut_ops[stage_name]]
for j in range(stage_lnum[i]):
for _ in range(stage_lnum[i]):
searched_op = ops_names[choice_ids[layer_id]]
choice_names.append(searched_op)
layer_id += 1
......
......@@ -3,7 +3,6 @@
from __future__ import absolute_import, division, print_function
import gc # noqa: F401
import os
import timeit
import torch
......@@ -159,7 +158,7 @@ def supernet_sample(model, state_dict, sampled_arch=[], lookup_table=None):
layer_id = 0
for i, stage in enumerate(stages):
ops_names = [op_name for op_name in lookup_table.lut_ops[stage]]
for j in range(stage_lnum[i]):
for _ in range(stage_lnum[i]):
searched_op = sampled_arch[layer_id]
op_i = ops_names.index(searched_op)
replace.append(
......
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
# pylint: skip-file
import json
import logging
import os
......
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
# pylint: skip-file
import tensorflow as tf
from tensorflow.keras.layers import Dense, Embedding, LSTMCell, RNN
from tensorflow.keras.losses import SparseCategoricalCrossentropy, Reduction
......
......@@ -12,7 +12,7 @@ from typing import Optional, Tuple
import colorama
import nni_node # pylint: disable=import-error
import nni_node # pylint: disable=wrong-import-order, import-error
import nni.runtime.protocol
from .config import ExperimentConfig
......
import logging
import sys
from datetime import datetime
from io import TextIOBase
import logging
from logging import FileHandler, Formatter, Handler, StreamHandler
from pathlib import Path
import sys
import time
from typing import Optional
import colorama
from .env_vars import dispatcher_env_vars, trial_env_vars
handlers = {}
log_format = '[%(asctime)s] %(levelname)s (%(name)s/%(threadName)s) %(message)s'
......
......@@ -6,11 +6,12 @@ from collections import defaultdict
import json_tricks
from nni import NoMoreTrialError
from .protocol import CommandType, send
from .msg_dispatcher_base import MsgDispatcherBase
from nni.assessor import AssessResult
from .common import multi_thread_enabled, multi_phase_enabled
from .env_vars import dispatcher_env_vars
from .msg_dispatcher_base import MsgDispatcherBase
from .protocol import CommandType, send
from ..utils import MetricType, to_json
_logger = logging.getLogger(__name__)
......
......@@ -7,8 +7,8 @@ import json
import time
import subprocess
from ..env_vars import trial_env_vars
from nni.utils import to_json
from ..env_vars import trial_env_vars
_sysdir = trial_env_vars.NNI_SYS_DIR
if not os.path.exists(os.path.join(_sysdir, '.nni')):
......
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import colorama
import logging
import warnings
import colorama
import json_tricks
__all__ = [
......
......@@ -14,7 +14,7 @@ from subprocess import Popen, check_call, CalledProcessError, PIPE, STDOUT
from nni.experiment.config import ExperimentConfig, convert
from nni.tools.annotation import expand_annotations, generate_search_space
from nni.tools.package_utils import get_builtin_module_class_name
import nni_node # pylint: disable=import-error
import nni_node # pylint: disable=import-error, wrong-import-order
from .launcher_utils import validate_all_content
from .rest_utils import rest_put, rest_post, check_rest_server, check_response
from .url_utils import cluster_metadata_url, experiment_url, get_local_urls, set_prefix_url
......
......@@ -13,7 +13,7 @@ import traceback
from datetime import datetime, timezone
from subprocess import Popen
from nni.tools.annotation import expand_annotations
import nni_node # pylint: disable=import-error
import nni_node # pylint: disable=wrong-import-order, import-error
from .rest_utils import rest_get, rest_delete, check_rest_server_quick, check_response
from .url_utils import trial_jobs_url, experiment_url, trial_job_id_url, export_data_url, metric_data_url
from .config_utils import Config, Experiments
......@@ -76,13 +76,16 @@ def check_experiment_id(args, update=True):
print_error('There are multiple experiments, please set the experiment id...')
experiment_information = ""
for key in running_experiment_list:
experiment_information += EXPERIMENT_DETAIL_FORMAT % (key,
experiments_dict[key].get('experimentName', 'N/A'),
experiments_dict[key]['status'],
experiments_dict[key].get('port', 'N/A'),
experiments_dict[key].get('platform'),
time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(experiments_dict[key]['startTime'] / 1000)) if isinstance(experiments_dict[key]['startTime'], int) else experiments_dict[key]['startTime'],
time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(experiments_dict[key]['endTime'] / 1000)) if isinstance(experiments_dict[key]['endTime'], int) else experiments_dict[key]['endTime'])
experiment_information += EXPERIMENT_DETAIL_FORMAT % (
key,
experiments_dict[key].get('experimentName', 'N/A'),
experiments_dict[key]['status'],
experiments_dict[key].get('port', 'N/A'),
experiments_dict[key].get('platform'),
time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(experiments_dict[key]['startTime'] / 1000)) \
if isinstance(experiments_dict[key]['startTime'], int) else experiments_dict[key]['startTime'],
time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(experiments_dict[key]['endTime'] / 1000)) \
if isinstance(experiments_dict[key]['endTime'], int) else experiments_dict[key]['endTime'])
print(EXPERIMENT_INFORMATION_FORMAT % experiment_information)
exit(1)
elif not running_experiment_list:
......@@ -136,13 +139,16 @@ def parse_ids(args):
print_error('There are multiple experiments, please set the experiment id...')
experiment_information = ""
for key in running_experiment_list:
experiment_information += EXPERIMENT_DETAIL_FORMAT % (key,
experiments_dict[key].get('experimentName', 'N/A'),
experiments_dict[key]['status'],
experiments_dict[key].get('port', 'N/A'),
experiments_dict[key].get('platform'),
time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(experiments_dict[key]['startTime'] / 1000)) if isinstance(experiments_dict[key]['startTime'], int) else experiments_dict[key]['startTime'],
time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(experiments_dict[key]['endTime'] / 1000)) if isinstance(experiments_dict[key]['endTime'], int) else experiments_dict[key]['endTime'])
experiment_information += EXPERIMENT_DETAIL_FORMAT % (
key,
experiments_dict[key].get('experimentName', 'N/A'),
experiments_dict[key]['status'],
experiments_dict[key].get('port', 'N/A'),
experiments_dict[key].get('platform'),
time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(experiments_dict[key]['startTime'] / 1000)) \
if isinstance(experiments_dict[key]['startTime'], int) else experiments_dict[key]['startTime'],
time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(experiments_dict[key]['endTime'] / 1000)) \
if isinstance(experiments_dict[key]['endTime'], int) else experiments_dict[key]['endTime'])
print(EXPERIMENT_INFORMATION_FORMAT % experiment_information)
exit(1)
else:
......@@ -615,13 +621,16 @@ def experiment_list(args):
print_warning('There is no experiment running...\nYou can use \'nnictl experiment list --all\' to list all experiments.')
experiment_information = ""
for key in experiment_id_list:
experiment_information += EXPERIMENT_DETAIL_FORMAT % (key,
experiments_dict[key].get('experimentName', 'N/A'),
experiments_dict[key]['status'],
experiments_dict[key].get('port', 'N/A'),
experiments_dict[key].get('platform'),
time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(experiments_dict[key]['startTime'] / 1000)) if isinstance(experiments_dict[key]['startTime'], int) else experiments_dict[key]['startTime'],
time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(experiments_dict[key]['endTime'] / 1000)) if isinstance(experiments_dict[key]['endTime'], int) else experiments_dict[key]['endTime'])
experiment_information += EXPERIMENT_DETAIL_FORMAT % (
key,
experiments_dict[key].get('experimentName', 'N/A'),
experiments_dict[key]['status'],
experiments_dict[key].get('port', 'N/A'),
experiments_dict[key].get('platform'),
time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(experiments_dict[key]['startTime'] / 1000)) \
if isinstance(experiments_dict[key]['startTime'], int) else experiments_dict[key]['startTime'],
time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(experiments_dict[key]['endTime'] / 1000)) \
if isinstance(experiments_dict[key]['endTime'], int) else experiments_dict[key]['endTime'])
print(EXPERIMENT_INFORMATION_FORMAT % experiment_information)
return experiment_id_list
......@@ -656,9 +665,12 @@ def show_experiment_info():
print_warning('There is no experiment running...')
return
for key in experiment_id_list:
print(EXPERIMENT_MONITOR_INFO % (key, experiments_dict[key]['status'], experiments_dict[key]['port'], \
experiments_dict[key].get('platform'), time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(experiments_dict[key]['startTime'] / 1000)) if isinstance(experiments_dict[key]['startTime'], int) else experiments_dict[key]['startTime'], \
get_time_interval(experiments_dict[key]['startTime'], experiments_dict[key]['endTime'])))
print(EXPERIMENT_MONITOR_INFO % (
key, experiments_dict[key]['status'], experiments_dict[key]['port'],
experiments_dict[key].get('platform'),
time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(experiments_dict[key]['startTime'] / 1000)) \
if isinstance(experiments_dict[key]['startTime'], int) else experiments_dict[key]['startTime'],
get_time_interval(experiments_dict[key]['startTime'], experiments_dict[key]['endTime'])))
print(TRIAL_MONITOR_HEAD)
running, response = check_rest_server_quick(experiments_dict[key]['port'])
if running:
......
......@@ -182,12 +182,13 @@ stages:
- script: |
set -e
cd test
python -m pytest ut --ignore=ut/sdk/test_pruners.py \
python -m pytest ut --cov-config=.coveragerc \
--ignore=ut/sdk/test_pruners.py \
--ignore=ut/sdk/test_compressor_tf.py \
--ignore=ut/sdk/test_compressor_torch.py
python -m pytest ut/sdk/test_pruners.py
python -m pytest ut/sdk/test_compressor_tf.py
python -m pytest ut/sdk/test_compressor_torch.py
python -m pytest ut/sdk/test_pruners.py --cov-config=.coveragerc --cov-append
python -m pytest ut/sdk/test_compressor_tf.py --cov-config=.coveragerc --cov-append
python -m pytest ut/sdk/test_compressor_torch.py --cov-config=.coveragerc --cov-append
displayName: Python unit test
- script: |
......@@ -198,6 +199,19 @@ stages:
CI=true yarn test
displayName: TypeScript unit test
- task: PublishTestResults@2
condition: succeededOrFailed()
inputs:
testResultsFiles: '$(System.DefaultWorkingDirectory)/**/test-*.xml'
testRunTitle: 'Publish test results for Python $(python.version)'
displayName: Publish test results
- task: PublishCodeCoverageResults@1
inputs:
codeCoverageTool: Cobertura
summaryFileLocation: '$(System.DefaultWorkingDirectory)/**/*coverage.xml'
displayName: Publish code coverage results
- script: |
cd test
python nni_test/nnitest/run_tests.py --config config/pr_tests.yml
......
# .coveragerc to control coverage.py
[run]
branch = True
parallel = True
data_file = ${COVERAGE_DATA_FILE}
source = nni, nni.tools.cmd, nni.tools.trial_tool
concurrency = multiprocessing
......@@ -26,6 +23,3 @@ exclude_lines =
if __name__ == .__main__.:
ignore_errors = True
[html]
directory = ${COVERAGE_HTML_DIR}
[pytest]
addopts = --cov=nni --cov-config=.coveragerc --junitxml=junit/test-results.xml --cov-report=xml --cov-report=html --cov-config=.coveragerc
#Build result
# Build result
dist/
#node modules
# node modules
node_modules/
# test files
.experiment.test
......@@ -4,7 +4,7 @@
"main": "index.js",
"scripts": {
"build": "tsc",
"test": "nyc mocha -r ts-node/register -t 15000 --recursive **/*.test.ts --exclude node_modules/**/**/*.test.ts --colors",
"test": "nyc --reporter=cobertura --reporter=html --reporter=text --report-dir=./htmlcov mocha -r ts-node/register -t 15000 --recursive **/*.test.ts --exclude node_modules/**/**/*.test.ts --colors",
"start": "node dist/main.js",
"watch": "tsc --watch",
"eslint": "npx eslint ./ --ext .ts"
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment