Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
tianlh
LightGBM-DCU
Commits
b78175b7
Unverified
Commit
b78175b7
authored
Sep 23, 2021
by
Nikita Titov
Committed by
GitHub
Sep 23, 2021
Browse files
[python] add placeholders to titles in plotting functions (#4614)
parent
b1c261a5
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
77 additions
and
12 deletions
+77
-12
python-package/lightgbm/plotting.py
python-package/lightgbm/plotting.py
+10
-3
tests/python_package_test/test_plotting.py
tests/python_package_test/test_plotting.py
+67
-9
No files found.
python-package/lightgbm/plotting.py
View file @
b78175b7
...
...
@@ -62,6 +62,7 @@ def plot_importance(
xlabel : str or None, optional (default="Feature importance")
X-axis title label.
If None, title is disabled.
@importance_type@ placeholder can be used, and it will be replaced with the value of ``importance_type`` parameter.
ylabel : str or None, optional (default="Features")
Y-axis title label.
If None, title is disabled.
...
...
@@ -150,6 +151,7 @@ def plot_importance(
if
title
is
not
None
:
ax
.
set_title
(
title
)
if
xlabel
is
not
None
:
xlabel
=
xlabel
.
replace
(
'@importance_type@'
,
importance_type
)
ax
.
set_xlabel
(
xlabel
)
if
ylabel
is
not
None
:
ax
.
set_ylabel
(
ylabel
)
...
...
@@ -318,6 +320,7 @@ def plot_metric(
Y-axis title label.
If 'auto', metric name is used.
If None, title is disabled.
@metric@ placeholder can be used, and it will be replaced with metric name.
figsize : tuple of 2 elements or None, optional (default=None)
Figure size.
dpi : int or None, optional (default=None)
...
...
@@ -370,14 +373,17 @@ def plot_metric(
if
metric
not
in
metrics_for_one
:
raise
KeyError
(
'No given metric in eval results.'
)
results
=
metrics_for_one
[
metric
]
num_iteration
,
max_result
,
min_result
=
len
(
results
),
max
(
results
),
min
(
results
)
num_iteration
=
len
(
results
)
max_result
=
max
(
results
)
min_result
=
min
(
results
)
x_
=
range
(
num_iteration
)
ax
.
plot
(
x_
,
results
,
label
=
name
)
for
name
in
dataset_names
:
metrics_for_one
=
eval_results
[
name
]
results
=
metrics_for_one
[
metric
]
max_result
,
min_result
=
max
(
max
(
results
),
max_result
),
min
(
min
(
results
),
min_result
)
max_result
=
max
(
max
(
results
),
max_result
)
min_result
=
min
(
min
(
results
),
min_result
)
ax
.
plot
(
x_
,
results
,
label
=
name
)
ax
.
legend
(
loc
=
'best'
)
...
...
@@ -396,13 +402,14 @@ def plot_metric(
ax
.
set_ylim
(
ylim
)
if
ylabel
==
'auto'
:
ylabel
=
metric
ylabel
=
'@
metric
@'
if
title
is
not
None
:
ax
.
set_title
(
title
)
if
xlabel
is
not
None
:
ax
.
set_xlabel
(
xlabel
)
if
ylabel
is
not
None
:
ylabel
=
ylabel
.
replace
(
'@metric@'
,
metric
)
ax
.
set_ylabel
(
ylabel
)
ax
.
grid
(
grid
)
return
ax
...
...
tests/python_package_test/test_plotting.py
View file @
b78175b7
...
...
@@ -68,6 +68,13 @@ def test_plot_importance(params, breast_cancer_split, train_data):
assert
ax2
.
patches
[
2
].
get_facecolor
()
==
(
0
,
.
5
,
0
,
1.
)
# g
assert
ax2
.
patches
[
3
].
get_facecolor
()
==
(
0
,
0
,
1.
,
1.
)
# b
ax3
=
lgb
.
plot_importance
(
gbm0
,
title
=
't @importance_type@'
,
xlabel
=
'x @importance_type@'
,
ylabel
=
'y @importance_type@'
)
assert
isinstance
(
ax3
,
matplotlib
.
axes
.
Axes
)
assert
ax3
.
get_title
()
==
't @importance_type@'
assert
ax3
.
get_xlabel
()
==
'x split'
assert
ax3
.
get_ylabel
()
==
'y @importance_type@'
assert
len
(
ax3
.
patches
)
<=
30
gbm2
=
lgb
.
LGBMClassifier
(
n_estimators
=
10
,
num_leaves
=
3
,
silent
=
True
,
importance_type
=
"gain"
)
gbm2
.
fit
(
X_train
,
y_train
)
...
...
@@ -194,26 +201,77 @@ def test_plot_metrics(params, breast_cancer_split, train_data):
num_boost_round
=
10
,
evals_result
=
evals_result0
,
verbose_eval
=
False
)
ax0
=
lgb
.
plot_metric
(
evals_result0
)
with
pytest
.
warns
(
UserWarning
,
match
=
"More than one metric available, picking one to plot."
):
ax0
=
lgb
.
plot_metric
(
evals_result0
)
assert
isinstance
(
ax0
,
matplotlib
.
axes
.
Axes
)
assert
ax0
.
get_title
()
==
'Metric during training'
assert
ax0
.
get_xlabel
()
==
'Iterations'
assert
ax0
.
get_ylabel
()
in
{
'binary_logloss'
,
'binary_error'
}
ax0
=
lgb
.
plot_metric
(
evals_result0
,
metric
=
'binary_error'
)
ax0
=
lgb
.
plot_metric
(
evals_result0
,
metric
=
'binary_logloss'
,
dataset_names
=
[
'v2'
])
legend_items
=
ax0
.
get_legend
().
get_texts
()
assert
len
(
legend_items
)
==
2
assert
legend_items
[
0
].
get_text
()
==
'v1'
assert
legend_items
[
1
].
get_text
()
==
'v2'
ax1
=
lgb
.
plot_metric
(
evals_result0
,
metric
=
'binary_error'
)
assert
isinstance
(
ax1
,
matplotlib
.
axes
.
Axes
)
assert
ax1
.
get_title
()
==
'Metric during training'
assert
ax1
.
get_xlabel
()
==
'Iterations'
assert
ax1
.
get_ylabel
()
==
'binary_error'
legend_items
=
ax1
.
get_legend
().
get_texts
()
assert
len
(
legend_items
)
==
2
assert
legend_items
[
0
].
get_text
()
==
'v1'
assert
legend_items
[
1
].
get_text
()
==
'v2'
ax2
=
lgb
.
plot_metric
(
evals_result0
,
metric
=
'binary_logloss'
,
dataset_names
=
[
'v2'
])
assert
isinstance
(
ax2
,
matplotlib
.
axes
.
Axes
)
assert
ax2
.
get_title
()
==
'Metric during training'
assert
ax2
.
get_xlabel
()
==
'Iterations'
assert
ax2
.
get_ylabel
()
==
'binary_logloss'
legend_items
=
ax2
.
get_legend
().
get_texts
()
assert
len
(
legend_items
)
==
1
assert
legend_items
[
0
].
get_text
()
==
'v2'
ax3
=
lgb
.
plot_metric
(
evals_result0
,
metric
=
'binary_logloss'
,
dataset_names
=
[
'v1'
],
title
=
'Metric @metric@'
,
xlabel
=
'Iterations @metric@'
,
ylabel
=
'Value of "@metric@"'
,
figsize
=
(
5
,
5
),
dpi
=
600
,
grid
=
False
)
assert
isinstance
(
ax3
,
matplotlib
.
axes
.
Axes
)
assert
ax3
.
get_title
()
==
'Metric @metric@'
assert
ax3
.
get_xlabel
()
==
'Iterations @metric@'
assert
ax3
.
get_ylabel
()
==
'Value of "binary_logloss"'
legend_items
=
ax3
.
get_legend
().
get_texts
()
assert
len
(
legend_items
)
==
1
assert
legend_items
[
0
].
get_text
()
==
'v1'
assert
ax3
.
get_figure
().
get_figheight
()
==
5
assert
ax3
.
get_figure
().
get_figwidth
()
==
5
assert
ax3
.
get_figure
().
get_dpi
()
==
600
for
grid_line
in
ax3
.
get_xgridlines
():
assert
not
grid_line
.
get_visible
()
for
grid_line
in
ax3
.
get_ygridlines
():
assert
not
grid_line
.
get_visible
()
evals_result1
=
{}
lgb
.
train
(
params
,
train_data
,
num_boost_round
=
10
,
evals_result
=
evals_result1
,
verbose_eval
=
False
)
with
pytest
.
raises
(
ValueError
):
with
pytest
.
raises
(
ValueError
,
match
=
"eval results cannot be empty."
):
lgb
.
plot_metric
(
evals_result1
)
gbm2
=
lgb
.
LGBMClassifier
(
n_estimators
=
10
,
num_leaves
=
3
,
silent
=
True
)
gbm2
.
fit
(
X_train
,
y_train
,
eval_set
=
[(
X_test
,
y_test
)],
verbose
=
False
)
ax2
=
lgb
.
plot_metric
(
gbm2
,
title
=
None
,
xlabel
=
None
,
ylabel
=
None
)
assert
isinstance
(
ax2
,
matplotlib
.
axes
.
Axes
)
assert
ax2
.
get_title
()
==
''
assert
ax2
.
get_xlabel
()
==
''
assert
ax2
.
get_ylabel
()
==
''
ax4
=
lgb
.
plot_metric
(
gbm2
,
title
=
None
,
xlabel
=
None
,
ylabel
=
None
)
assert
isinstance
(
ax4
,
matplotlib
.
axes
.
Axes
)
assert
ax4
.
get_title
()
==
''
assert
ax4
.
get_xlabel
()
==
''
assert
ax4
.
get_ylabel
()
==
''
legend_items
=
ax4
.
get_legend
().
get_texts
()
assert
len
(
legend_items
)
==
1
assert
legend_items
[
0
].
get_text
()
==
'valid_0'
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment