Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
dlib
Commits
e88e166e
Commit
e88e166e
authored
Nov 29, 2019
by
Davis King
Browse files
slightly nicer default behavior
parent
f2cd9e3b
Changes
1
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
45 additions
and
18 deletions
+45
-18
dlib/global_optimization/find_max_global.h
dlib/global_optimization/find_max_global.h
+45
-18
No files found.
dlib/global_optimization/find_max_global.h
View file @
e88e166e
...
@@ -165,6 +165,8 @@ template <typename T> static auto go(T&& f, const matrix<double, 0, 1>& a) -> de
...
@@ -165,6 +165,8 @@ template <typename T> static auto go(T&& f, const matrix<double, 0, 1>& a) -> de
const
auto
time_to_stop
=
steady_clock
::
now
()
+
max_runtime
;
const
auto
time_to_stop
=
steady_clock
::
now
()
+
max_runtime
;
double
max_solver_overhead_time
=
0
;
// Now run the main solver loop.
// Now run the main solver loop.
for
(
size_t
i
=
0
;
i
<
num
.
max_calls
&&
steady_clock
::
now
()
<
time_to_stop
;
++
i
)
for
(
size_t
i
=
0
;
i
<
num
.
max_calls
&&
steady_clock
::
now
()
<
time_to_stop
;
++
i
)
{
{
...
@@ -193,6 +195,15 @@ template <typename T> static auto go(T&& f, const matrix<double, 0, 1>& a) -> de
...
@@ -193,6 +195,15 @@ template <typename T> static auto go(T&& f, const matrix<double, 0, 1>& a) -> de
tp
.
add_task_by_value
(
execute_call
);
tp
.
add_task_by_value
(
execute_call
);
std
::
lock_guard
<
std
::
mutex
>
lock
(
eval_time_mutex
);
std
::
lock_guard
<
std
::
mutex
>
lock
(
eval_time_mutex
);
const
double
obj_funct_time
=
objective_funct_eval_time
.
mean
()
/
std
::
max
(
1ul
,
tp
.
num_threads_in_pool
());
const
double
solver_overhead_time
=
duration_cast
<
nanoseconds
>
(
get_next_x_runtime
).
count
();
max_solver_overhead_time
=
std
::
max
(
max_solver_overhead_time
,
solver_overhead_time
);
// Don't start thinking about the logic below until we have at least 5 objective
// function samples for each objective function. This way we have a decent idea how
// fast these things are. The solver overhead is really small initially so none of
// the stuff below really matters in the beginning anyway.
if
(
objective_funct_eval_time
.
current_n
()
>
functions
.
size
()
*
5
)
{
// If calling opt.get_next_x() is taking a long time relative to how long it takes
// If calling opt.get_next_x() is taking a long time relative to how long it takes
// to evaluate the objective function then we should spend less time grinding on the
// to evaluate the objective function then we should spend less time grinding on the
// internal details of the optimizer and more time running the actual objective
// internal details of the optimizer and more time running the actual objective
...
@@ -202,16 +213,32 @@ template <typename T> static auto go(T&& f, const matrix<double, 0, 1>& a) -> de
...
@@ -202,16 +213,32 @@ template <typename T> static auto go(T&& f, const matrix<double, 0, 1>& a) -> de
// is really fast to evaluate. This is because the point of the Monte Carlo part is
// is really fast to evaluate. This is because the point of the Monte Carlo part is
// to try really hard to avoid calls to really expensive objective functions. But
// to try really hard to avoid calls to really expensive objective functions. But
// if the objective function is not expensive then we should just call it.
// if the objective function is not expensive then we should just call it.
if
(
objective_funct_eval_time
.
current_n
()
>
functions
.
size
()
*
5
&&
if
(
obj_funct_time
<
solver_overhead_time
)
objective_funct_eval_time
.
mean
()
/
std
::
max
(
1ul
,
tp
.
num_threads_in_pool
())
<
duration_cast
<
nanoseconds
>
(
get_next_x_runtime
).
count
())
{
{
// Reduce the amount of Monte Carlo sampling we do. If it goes low enough
// we will disable it altogether.
const
size_t
new_val
=
static_cast
<
size_t
>
(
std
::
floor
(
opt
.
get_monte_carlo_upper_bound_sample_num
()
*
0.8
));
const
size_t
new_val
=
static_cast
<
size_t
>
(
std
::
floor
(
opt
.
get_monte_carlo_upper_bound_sample_num
()
*
0.8
));
opt
.
set_monte_carlo_upper_bound_sample_num
(
std
::
max
<
size_t
>
(
1
,
new_val
));
opt
.
set_monte_carlo_upper_bound_sample_num
(
std
::
max
<
size_t
>
(
1
,
new_val
));
// At this point just disable the upper bounding Monte Carlo search stuff and
// At this point just disable the upper bounding Monte Carlo search stuff and
// use only pure random search since the objective function is super cheap to
// use only pure random search since the objective function is super cheap to
// evaluate, making this more fancy search a waste of time.
// evaluate, making this more fancy search a waste of time.
if
(
opt
.
get_monte_carlo_upper_bound_sample_num
()
==
1
)
{
if
(
opt
.
get_monte_carlo_upper_bound_sample_num
()
==
1
)
{
opt
.
set_pure_random_search_probability
(
1
);
opt
.
set_pure_random_search_probability
(
1
);
}
}
}
else
if
(
obj_funct_time
>
1.5
*
max_solver_overhead_time
)
// Consider reenabling
{
// The Monte Carlo overhead grows over time as the solver accumulates more
// information about the objective function. So we only want to reenable it
// or make it bigger if the objective function really is more expensive. So
// we compare to the max solver runtime we have seen so far. If the
// objective function has suddenly gotten more expensive then we start to
// turn the Monte Carlo modeling back on.
const
size_t
new_val
=
static_cast
<
size_t
>
(
std
::
ceil
(
opt
.
get_monte_carlo_upper_bound_sample_num
()
*
1.28
));
opt
.
set_monte_carlo_upper_bound_sample_num
(
std
::
min
<
size_t
>
(
5000
,
new_val
));
// Set this back to its default value.
opt
.
set_pure_random_search_probability
(
0.02
);
}
}
}
}
}
tp
.
wait_for_all_tasks
();
tp
.
wait_for_all_tasks
();
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment