"vscode:/vscode.git/clone" did not exist on "9c386db16625f763ad89f73a25b9aeb0e3cd7e7f"
Unverified Commit da3465cb authored by sayantan sadhu's avatar sayantan sadhu Committed by GitHub
Browse files

[python] improving the syntax of the fstring in the file :...

[python] improving the syntax of the fstring in the file : tests/python_package_test/test_basic.py (#4312)
parent a372ed50
......@@ -159,7 +159,7 @@ def test_add_features_throws_if_datasets_unconstructed():
def test_add_features_equal_data_on_alternating_used_unused(tmp_path):
X = np.random.random((100, 5))
X[:, [1, 3]] = 0
names = ['col_%d' % i for i in range(5)]
names = [f'col_{i}' for i in range(5)]
for j in range(1, 5):
d1 = lgb.Dataset(X[:, :j], feature_name=names[:j]).construct()
d2 = lgb.Dataset(X[:, j:], feature_name=names[j:]).construct()
......@@ -179,7 +179,7 @@ def test_add_features_equal_data_on_alternating_used_unused(tmp_path):
def test_add_features_same_booster_behaviour(tmp_path):
X = np.random.random((100, 5))
X[:, [1, 3]] = 0
names = ['col_%d' % i for i in range(5)]
names = [f'col_{i}' for i in range(5)]
for j in range(1, 5):
d1 = lgb.Dataset(X[:, :j], feature_name=names[:j]).construct()
d2 = lgb.Dataset(X[:, j:], feature_name=names[j:]).construct()
......@@ -210,7 +210,7 @@ def test_add_features_from_different_sources():
n_col = 5
X = np.random.random((n_row, n_col))
xxs = [X, sparse.csr_matrix(X), pd.DataFrame(X)]
names = ['col_%d' % i for i in range(n_col)]
names = [f'col_{i}' for i in range(n_col)]
for x_1 in xxs:
# test that method works even with free_raw_data=True
d1 = lgb.Dataset(x_1, feature_name=names, free_raw_data=True).construct()
......@@ -234,7 +234,7 @@ def test_add_features_from_different_sources():
d1.add_features_from(d2)
assert isinstance(d1.get_data(), original_type)
assert d1.get_data().shape == (n_row, n_col * idx)
res_feature_names += ['D{}_{}'.format(idx, name) for name in names]
res_feature_names += [f'D{idx}_{name}' for name in names]
assert d1.feature_name == res_feature_names
......@@ -242,7 +242,7 @@ def test_cegb_affects_behavior(tmp_path):
X = np.random.random((100, 5))
X[:, [1, 3]] = 0
y = np.random.random(100)
names = ['col_%d' % i for i in range(5)]
names = [f'col_{i}' for i in range(5)]
ds = lgb.Dataset(X, feature_name=names).construct()
ds.set_label(y)
base = lgb.Booster(train_set=ds)
......@@ -271,7 +271,7 @@ def test_cegb_scaling_equalities(tmp_path):
X = np.random.random((100, 5))
X[:, [1, 3]] = 0
y = np.random.random(100)
names = ['col_%d' % i for i in range(5)]
names = [f'col_{i}' for i in range(5)]
ds = lgb.Dataset(X, feature_name=names).construct()
ds.set_label(y)
# Compare pairs of penalties, to ensure scaling works as intended
......@@ -324,7 +324,7 @@ def test_consistent_state_for_dataset_fields():
sequence = np.ones(y.shape[0])
sequence[0] = np.nan
sequence[1] = np.inf
feature_names = ['f{0}'.format(i) for i in range(X.shape[1])]
feature_names = [f'f{i}'for i in range(X.shape[1])]
lgb_data = lgb.Dataset(X, sequence,
weight=sequence, init_score=sequence,
feature_name=feature_names).construct()
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment