Unverified Commit 10c5abb2 authored by Malinda's avatar Malinda Committed by GitHub
Browse files

Few optimizations with numpy (#4982)

parent 4e71ed62
...@@ -117,7 +117,7 @@ def run_mlp(dataset, config, tuner, log): ...@@ -117,7 +117,7 @@ def run_mlp(dataset, config, tuner, log):
# Here score is the output of score() from the estimator # Here score is the output of score() from the estimator
cur_score = cross_val_score(cur_model, X_train, y_train) cur_score = cross_val_score(cur_model, X_train, y_train)
cur_score = sum(cur_score) / float(len(cur_score)) cur_score = np.mean(cur_score)
if np.isnan(cur_score): if np.isnan(cur_score):
cur_score = 0 cur_score = 0
......
...@@ -138,7 +138,7 @@ def run_random_forest(dataset, config, tuner, log): ...@@ -138,7 +138,7 @@ def run_random_forest(dataset, config, tuner, log):
# Here score is the output of score() from the estimator # Here score is the output of score() from the estimator
cur_score = cross_val_score(cur_model, X_train, y_train) cur_score = cross_val_score(cur_model, X_train, y_train)
cur_score = sum(cur_score) / float(len(cur_score)) cur_score = np.mean(cur_score)
if np.isnan(cur_score): if np.isnan(cur_score):
cur_score = 0 cur_score = 0
......
...@@ -66,7 +66,7 @@ def compute_eval_metric(gt, predictions): ...@@ -66,7 +66,7 @@ def compute_eval_metric(gt, predictions):
thresholds = [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95] thresholds = [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]
ious = compute_ious(gt, predictions) ious = compute_ious(gt, predictions)
precisions = [compute_precision_at(ious, th) for th in thresholds] precisions = [compute_precision_at(ious, th) for th in thresholds]
return sum(precisions) / len(precisions) return np.mean(precisions)
def intersection_over_union(y_true, y_pred): def intersection_over_union(y_true, y_pred):
......
...@@ -259,14 +259,11 @@ class SimulatedAnnealingTaskGenerator(TaskGenerator): ...@@ -259,14 +259,11 @@ class SimulatedAnnealingTaskGenerator(TaskGenerator):
num_weights = sorted([self.weights_numel[op_name] for op_name in op_names]) num_weights = sorted([self.weights_numel[op_name] for op_name in op_names])
sparsity = sorted(random_sparsity) sparsity = sorted(random_sparsity)
total_weights = 0
total_weights_pruned = 0
# calculate the scale # calculate the scale
for idx, num_weight in enumerate(num_weights): total_weights = np.sum(num_weights)
total_weights += num_weight total_weights_pruned = np.sum([int(num_weight * sparsity[idx]) for idx, num_weight in enumerate(num_weights)])
total_weights_pruned += int(num_weight * sparsity[idx])
if total_weights_pruned == 0: if total_weights_pruned == 0:
return None return None
......
...@@ -633,7 +633,7 @@ class PPOTuner(Tuner): ...@@ -633,7 +633,7 @@ class PPOTuner(Tuner):
# use mean of finished trials as the result of this failed trial # use mean of finished trials as the result of this failed trial
values = [val for val in self.trials_result if val is not None] values = [val for val in self.trials_result if val is not None]
logger.warning('In trial_end, values: %s', values) logger.warning('In trial_end, values: %s', values)
self.trials_result[trial_info_idx] = (sum(values) / len(values)) if values else 0 self.trials_result[trial_info_idx] = (np.mean(values)) if values else 0
self.finished_trials += 1 self.finished_trials += 1
if self.finished_trials == self.inf_batch_size: if self.finished_trials == self.inf_batch_size:
logger.debug('Start next round inference in trial_end') logger.debug('Start next round inference in trial_end')
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment