validators.py 2.99 KB
Newer Older
chicm-ms's avatar
chicm-ms committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.

import os.path as osp
import json
import requests
import nnicli as nc
from utils import METRICS_URL


class ITValidator:
    def __call__(self, rest_endpoint, experiment_dir, nni_source_dir, **kwargs):
        pass


class MetricsValidator(ITValidator):
    def __call__(self, rest_endpoint, experiment_dir, nni_source_dir, **kwargs):
        self.check_metrics(nni_source_dir, **kwargs)

    def check_metrics(self, nni_source_dir, **kwargs):
        expected_result_file = kwargs.get('expected_result_file', 'expected_metrics.json')
        with open(osp.join(nni_source_dir, 'test', 'config', 'metrics_test', expected_result_file), 'r') as f:
            expected_metrics = json.load(f)
        print('expected metrics:', expected_metrics)
        metrics = requests.get(METRICS_URL).json()
        print('RAW METRICS:', json.dumps(metrics, indent=4))
        intermediate_result, final_result = self.get_metric_results(metrics)

        assert intermediate_result and final_result
        for trialjob_id in intermediate_result:
            trial_final_result = final_result[trialjob_id]
            trial_intermediate_result = intermediate_result[trialjob_id]
            print('intermediate result:', trial_intermediate_result)
            print('final result:', trial_final_result)
            assert len(trial_final_result) == 1, 'there should be 1 final result'
            assert trial_final_result[0] == expected_metrics['final_result']
            # encode dict/number into json string to compare them in set
chicm-ms's avatar
chicm-ms committed
38
39
            assert set([json.dumps(x, sort_keys=True) for x in trial_intermediate_result]) \
                == set([json.dumps(x, sort_keys=True) for x in expected_metrics['intermediate_result']])
chicm-ms's avatar
chicm-ms committed
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67

    def get_metric_results(self, metrics):
        intermediate_result = {}
        final_result = {}
        for metric in metrics:
            # metrics value are encoded by NNI SDK as json string,
            # here we decode the value by json.loads twice
            metric_value = json.loads(json.loads(metric['data']))
            if metric['type'] == 'PERIODICAL':
                if metric['trialJobId'] in intermediate_result:
                    intermediate_result[metric['trialJobId']].append(metric_value)
                else:
                    intermediate_result[metric['trialJobId']] = [metric_value]
            elif metric['type'] == 'FINAL':
                if metric['trialJobId'] in final_result:
                    final_result[metric['trialJobId']].append(metric_value)
                else:
                    final_result[metric['trialJobId']] = [metric_value]
        return intermediate_result, final_result

class NnicliValidator(ITValidator):
    def __call__(self, rest_endpoint, experiment_dir, nni_source_dir, **kwargs):
        print(rest_endpoint)
        nc.set_endpoint(rest_endpoint)
        #print(nc.version())
        print(nc.get_job_statistics())
        print(nc.get_experiment_status())
        print(nc.list_trial_jobs())