Unverified Commit fedaa36d authored by yxy235's avatar yxy235 Committed by GitHub
Browse files

[Example] Fix numpy errors in examples. (#6554)


Co-authored-by: default avatarUbuntu <ubuntu@ip-172-31-0-133.us-west-2.compute.internal>
parent eb434893
...@@ -89,7 +89,7 @@ for _, item in train_data.items(): ...@@ -89,7 +89,7 @@ for _, item in train_data.items():
for col, row in zip(cols, rows): for col, row in zip(cols, rows):
bg_matrix[gt_classes[col], gt_classes[row]] += 1 bg_matrix[gt_classes[col], gt_classes[row]] += 1
else: else:
all_possib = np.ones_like(iou_mat, dtype=np.bool) all_possib = np.ones_like(iou_mat, dtype=np.bool_)
np.fill_diagonal(all_possib, 0) np.fill_diagonal(all_possib, 0)
cols, rows = np.where(all_possib) cols, rows = np.where(all_possib)
for col, row in zip(cols, rows): for col, row in zip(cols, rows):
......
...@@ -11,7 +11,7 @@ def fit_logistic_regression(X, y, data_random_seed=1, repeat=1): ...@@ -11,7 +11,7 @@ def fit_logistic_regression(X, y, data_random_seed=1, repeat=1):
# transform targets to one-hot vector # transform targets to one-hot vector
one_hot_encoder = OneHotEncoder(categories="auto", sparse=False) one_hot_encoder = OneHotEncoder(categories="auto", sparse=False)
y = one_hot_encoder.fit_transform(y.reshape(-1, 1)).astype(np.bool) y = one_hot_encoder.fit_transform(y.reshape(-1, 1)).astype(np.bool_)
# normalize x # normalize x
X = normalize(X, norm="l2") X = normalize(X, norm="l2")
...@@ -42,7 +42,7 @@ def fit_logistic_regression(X, y, data_random_seed=1, repeat=1): ...@@ -42,7 +42,7 @@ def fit_logistic_regression(X, y, data_random_seed=1, repeat=1):
y_pred = clf.predict_proba(X_test) y_pred = clf.predict_proba(X_test)
y_pred = np.argmax(y_pred, axis=1) y_pred = np.argmax(y_pred, axis=1)
y_pred = one_hot_encoder.transform(y_pred.reshape(-1, 1)).astype( y_pred = one_hot_encoder.transform(y_pred.reshape(-1, 1)).astype(
np.bool np.bool_
) )
test_acc = metrics.accuracy_score(y_test, y_pred) test_acc = metrics.accuracy_score(y_test, y_pred)
...@@ -55,7 +55,7 @@ def fit_logistic_regression_preset_splits( ...@@ -55,7 +55,7 @@ def fit_logistic_regression_preset_splits(
): ):
# transform targets to one-hot vector # transform targets to one-hot vector
one_hot_encoder = OneHotEncoder(categories="auto", sparse=False) one_hot_encoder = OneHotEncoder(categories="auto", sparse=False)
y = one_hot_encoder.fit_transform(y.reshape(-1, 1)).astype(np.bool) y = one_hot_encoder.fit_transform(y.reshape(-1, 1)).astype(np.bool_)
# normalize x # normalize x
X = normalize(X, norm="l2") X = normalize(X, norm="l2")
...@@ -84,7 +84,7 @@ def fit_logistic_regression_preset_splits( ...@@ -84,7 +84,7 @@ def fit_logistic_regression_preset_splits(
y_pred = clf.predict_proba(X_val) y_pred = clf.predict_proba(X_val)
y_pred = np.argmax(y_pred, axis=1) y_pred = np.argmax(y_pred, axis=1)
y_pred = one_hot_encoder.transform(y_pred.reshape(-1, 1)).astype( y_pred = one_hot_encoder.transform(y_pred.reshape(-1, 1)).astype(
np.bool np.bool_
) )
val_acc = metrics.accuracy_score(y_val, y_pred) val_acc = metrics.accuracy_score(y_val, y_pred)
if val_acc > best_acc: if val_acc > best_acc:
...@@ -93,7 +93,7 @@ def fit_logistic_regression_preset_splits( ...@@ -93,7 +93,7 @@ def fit_logistic_regression_preset_splits(
y_pred = np.argmax(y_pred, axis=1) y_pred = np.argmax(y_pred, axis=1)
y_pred = one_hot_encoder.transform( y_pred = one_hot_encoder.transform(
y_pred.reshape(-1, 1) y_pred.reshape(-1, 1)
).astype(np.bool) ).astype(np.bool_)
best_test_acc = metrics.accuracy_score(y_test, y_pred) best_test_acc = metrics.accuracy_score(y_test, y_pred)
accuracies.append(best_test_acc) accuracies.append(best_test_acc)
......
...@@ -181,7 +181,7 @@ class QM9(QM9Dataset): ...@@ -181,7 +181,7 @@ class QM9(QM9Dataset):
dist = np.linalg.norm(R[:, None, :] - R[None, :, :], axis=-1) dist = np.linalg.norm(R[:, None, :] - R[None, :, :], axis=-1)
# keep all edges that don't exceed the cutoff and delete self-loops # keep all edges that don't exceed the cutoff and delete self-loops
adj = sp.csr_matrix(dist <= self.cutoff) - sp.eye( adj = sp.csr_matrix(dist <= self.cutoff) - sp.eye(
n_atoms, dtype=np.bool n_atoms, dtype=np.bool_
) )
adj = adj.tocoo() adj = adj.tocoo()
u, v = torch.tensor(adj.row), torch.tensor(adj.col) u, v = torch.tensor(adj.row), torch.tensor(adj.col)
......
...@@ -34,7 +34,7 @@ def repeat(n_times): ...@@ -34,7 +34,7 @@ def repeat(n_times):
def prob_to_one_hot(y_pred): def prob_to_one_hot(y_pred):
ret = np.zeros(y_pred.shape, np.bool) ret = np.zeros(y_pred.shape, np.bool_)
indices = np.argmax(y_pred, axis=1) indices = np.argmax(y_pred, axis=1)
for i in range(y_pred.shape[0]): for i in range(y_pred.shape[0]):
ret[i][indices[i]] = True ret[i][indices[i]] = True
...@@ -61,7 +61,7 @@ def label_classification( ...@@ -61,7 +61,7 @@ def label_classification(
Y = y.detach().cpu().numpy() Y = y.detach().cpu().numpy()
Y = Y.reshape(-1, 1) Y = Y.reshape(-1, 1)
onehot_encoder = OneHotEncoder(categories="auto").fit(Y) onehot_encoder = OneHotEncoder(categories="auto").fit(Y)
Y = onehot_encoder.transform(Y).toarray().astype(np.bool) Y = onehot_encoder.transform(Y).toarray().astype(np.bool_)
X = normalize(X, norm="l2") X = normalize(X, norm="l2")
......
...@@ -70,14 +70,14 @@ def load_data(args, multilabel): ...@@ -70,14 +70,14 @@ def load_data(args, multilabel):
DataType = namedtuple("Dataset", ["num_classes", "train_nid", "g"]) DataType = namedtuple("Dataset", ["num_classes", "train_nid", "g"])
adj_full = scipy.sparse.load_npz("./{}/adj_full.npz".format(prefix)).astype( adj_full = scipy.sparse.load_npz("./{}/adj_full.npz".format(prefix)).astype(
np.bool np.bool_
) )
g = dgl.from_scipy(adj_full) g = dgl.from_scipy(adj_full)
num_nodes = g.num_nodes() num_nodes = g.num_nodes()
adj_train = scipy.sparse.load_npz( adj_train = scipy.sparse.load_npz(
"./{}/adj_train.npz".format(prefix) "./{}/adj_train.npz".format(prefix)
).astype(np.bool) ).astype(np.bool_)
train_nid = np.array(list(set(adj_train.nonzero()[0]))) train_nid = np.array(list(set(adj_train.nonzero()[0])))
role = json.load(open("./{}/role.json".format(prefix))) role = json.load(open("./{}/role.json".format(prefix)))
......
...@@ -157,7 +157,7 @@ def HEM_one_level(rr, cc, vv, rid, weights): ...@@ -157,7 +157,7 @@ def HEM_one_level(rr, cc, vv, rid, weights):
nnz = rr.shape[0] nnz = rr.shape[0]
N = rr[nnz - 1] + 1 N = rr[nnz - 1] + 1
marked = np.zeros(N, np.bool) marked = np.zeros(N, np.bool_)
rowstart = np.zeros(N, np.int32) rowstart = np.zeros(N, np.int32)
rowlength = np.zeros(N, np.int32) rowlength = np.zeros(N, np.int32)
cluster_id = np.zeros(N, np.int32) cluster_id = np.zeros(N, np.int32)
......
...@@ -11,9 +11,9 @@ import tqdm ...@@ -11,9 +11,9 @@ import tqdm
# takes. It essentially follows the intuition of "training on the past and predict the future". # takes. It essentially follows the intuition of "training on the past and predict the future".
# One can also change the threshold to make validation and test set take larger proportions. # One can also change the threshold to make validation and test set take larger proportions.
def train_test_split_by_time(df, timestamp, user): def train_test_split_by_time(df, timestamp, user):
df["train_mask"] = np.ones((len(df),), dtype=np.bool) df["train_mask"] = np.ones((len(df),), dtype=np.bool_)
df["val_mask"] = np.zeros((len(df),), dtype=np.bool) df["val_mask"] = np.zeros((len(df),), dtype=np.bool_)
df["test_mask"] = np.zeros((len(df),), dtype=np.bool) df["test_mask"] = np.zeros((len(df),), dtype=np.bool_)
df = dd.from_pandas(df, npartitions=10) df = dd.from_pandas(df, npartitions=10)
def train_test_split(df): def train_test_split(df):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment