Unverified Commit 3bcacc62 authored by Rhett Ying's avatar Rhett Ying Committed by GitHub
Browse files

[doc] shorten notebook outputs (#6831)

parent 5a24d02d
...@@ -128,7 +128,7 @@ ...@@ -128,7 +128,7 @@
"# Edge type: \"user:like:item\"\n", "# Edge type: \"user:like:item\"\n",
"like_edges_path = os.path.join(base_dir, \"like-edges.csv\")\n", "like_edges_path = os.path.join(base_dir, \"like-edges.csv\")\n",
"like_edges = np.random.randint(0, num_nodes, size=(num_edges, 2))\n", "like_edges = np.random.randint(0, num_nodes, size=(num_edges, 2))\n",
"print(f\"Part of [user:like:item] edges: {like_edges[:10, :]}\\n\")\n", "print(f\"Part of [user:like:item] edges: {like_edges[:5, :]}\\n\")\n",
"\n", "\n",
"df = pd.DataFrame(like_edges)\n", "df = pd.DataFrame(like_edges)\n",
"df.to_csv(like_edges_path, index=False, header=False)\n", "df.to_csv(like_edges_path, index=False, header=False)\n",
...@@ -137,7 +137,7 @@ ...@@ -137,7 +137,7 @@
"# Edge type: \"user:follow:user\"\n", "# Edge type: \"user:follow:user\"\n",
"follow_edges_path = os.path.join(base_dir, \"follow-edges.csv\")\n", "follow_edges_path = os.path.join(base_dir, \"follow-edges.csv\")\n",
"follow_edges = np.random.randint(0, num_nodes, size=(num_edges, 2))\n", "follow_edges = np.random.randint(0, num_nodes, size=(num_edges, 2))\n",
"print(f\"Part of [user:follow:user] edges: {follow_edges[:10, :]}\\n\")\n", "print(f\"Part of [user:follow:user] edges: {follow_edges[:5, :]}\\n\")\n",
"\n", "\n",
"df = pd.DataFrame(follow_edges)\n", "df = pd.DataFrame(follow_edges)\n",
"df.to_csv(follow_edges_path, index=False, header=False)\n", "df.to_csv(follow_edges_path, index=False, header=False)\n",
...@@ -165,56 +165,56 @@ ...@@ -165,56 +165,56 @@
"# Generate node[user] feature in numpy array.\n", "# Generate node[user] feature in numpy array.\n",
"node_user_feat_0_path = os.path.join(base_dir, \"node-user-feat-0.npy\")\n", "node_user_feat_0_path = os.path.join(base_dir, \"node-user-feat-0.npy\")\n",
"node_user_feat_0 = np.random.rand(num_nodes, 5)\n", "node_user_feat_0 = np.random.rand(num_nodes, 5)\n",
"print(f\"Part of node[user] feature [feat_0]: {node_user_feat_0[:10, :]}\")\n", "print(f\"Part of node[user] feature [feat_0]: {node_user_feat_0[:3, :]}\")\n",
"np.save(node_user_feat_0_path, node_user_feat_0)\n", "np.save(node_user_feat_0_path, node_user_feat_0)\n",
"print(f\"Node[user] feature [feat_0] is saved to {node_user_feat_0_path}\\n\")\n", "print(f\"Node[user] feature [feat_0] is saved to {node_user_feat_0_path}\\n\")\n",
"\n", "\n",
"# Generate another node[user] feature in torch tensor\n", "# Generate another node[user] feature in torch tensor\n",
"node_user_feat_1_path = os.path.join(base_dir, \"node-user-feat-1.pt\")\n", "node_user_feat_1_path = os.path.join(base_dir, \"node-user-feat-1.pt\")\n",
"node_user_feat_1 = torch.rand(num_nodes, 5)\n", "node_user_feat_1 = torch.rand(num_nodes, 5)\n",
"print(f\"Part of node[user] feature [feat_1]: {node_user_feat_1[:10, :]}\")\n", "print(f\"Part of node[user] feature [feat_1]: {node_user_feat_1[:3, :]}\")\n",
"torch.save(node_user_feat_1, node_user_feat_1_path)\n", "torch.save(node_user_feat_1, node_user_feat_1_path)\n",
"print(f\"Node[user] feature [feat_1] is saved to {node_user_feat_1_path}\\n\")\n", "print(f\"Node[user] feature [feat_1] is saved to {node_user_feat_1_path}\\n\")\n",
"\n", "\n",
"# Generate node[item] feature in numpy array.\n", "# Generate node[item] feature in numpy array.\n",
"node_item_feat_0_path = os.path.join(base_dir, \"node-item-feat-0.npy\")\n", "node_item_feat_0_path = os.path.join(base_dir, \"node-item-feat-0.npy\")\n",
"node_item_feat_0 = np.random.rand(num_nodes, 5)\n", "node_item_feat_0 = np.random.rand(num_nodes, 5)\n",
"print(f\"Part of node[item] feature [feat_0]: {node_item_feat_0[:10, :]}\")\n", "print(f\"Part of node[item] feature [feat_0]: {node_item_feat_0[:3, :]}\")\n",
"np.save(node_item_feat_0_path, node_item_feat_0)\n", "np.save(node_item_feat_0_path, node_item_feat_0)\n",
"print(f\"Node[item] feature [feat_0] is saved to {node_item_feat_0_path}\\n\")\n", "print(f\"Node[item] feature [feat_0] is saved to {node_item_feat_0_path}\\n\")\n",
"\n", "\n",
"# Generate another node[item] feature in torch tensor\n", "# Generate another node[item] feature in torch tensor\n",
"node_item_feat_1_path = os.path.join(base_dir, \"node-item-feat-1.pt\")\n", "node_item_feat_1_path = os.path.join(base_dir, \"node-item-feat-1.pt\")\n",
"node_item_feat_1 = torch.rand(num_nodes, 5)\n", "node_item_feat_1 = torch.rand(num_nodes, 5)\n",
"print(f\"Part of node[item] feature [feat_1]: {node_item_feat_1[:10, :]}\")\n", "print(f\"Part of node[item] feature [feat_1]: {node_item_feat_1[:3, :]}\")\n",
"torch.save(node_item_feat_1, node_item_feat_1_path)\n", "torch.save(node_item_feat_1, node_item_feat_1_path)\n",
"print(f\"Node[item] feature [feat_1] is saved to {node_item_feat_1_path}\\n\")\n", "print(f\"Node[item] feature [feat_1] is saved to {node_item_feat_1_path}\\n\")\n",
"\n", "\n",
"# Generate edge[user:like:item] feature in numpy array.\n", "# Generate edge[user:like:item] feature in numpy array.\n",
"edge_like_feat_0_path = os.path.join(base_dir, \"edge-like-feat-0.npy\")\n", "edge_like_feat_0_path = os.path.join(base_dir, \"edge-like-feat-0.npy\")\n",
"edge_like_feat_0 = np.random.rand(num_edges, 5)\n", "edge_like_feat_0 = np.random.rand(num_edges, 5)\n",
"print(f\"Part of edge[user:like:item] feature [feat_0]: {edge_like_feat_0[:10, :]}\")\n", "print(f\"Part of edge[user:like:item] feature [feat_0]: {edge_like_feat_0[:3, :]}\")\n",
"np.save(edge_like_feat_0_path, edge_like_feat_0)\n", "np.save(edge_like_feat_0_path, edge_like_feat_0)\n",
"print(f\"Edge[user:like:item] feature [feat_0] is saved to {edge_like_feat_0_path}\\n\")\n", "print(f\"Edge[user:like:item] feature [feat_0] is saved to {edge_like_feat_0_path}\\n\")\n",
"\n", "\n",
"# Generate another edge[user:like:item] feature in torch tensor\n", "# Generate another edge[user:like:item] feature in torch tensor\n",
"edge_like_feat_1_path = os.path.join(base_dir, \"edge-like-feat-1.pt\")\n", "edge_like_feat_1_path = os.path.join(base_dir, \"edge-like-feat-1.pt\")\n",
"edge_like_feat_1 = torch.rand(num_edges, 5)\n", "edge_like_feat_1 = torch.rand(num_edges, 5)\n",
"print(f\"Part of edge[user:like:item] feature [feat_1]: {edge_like_feat_1[:10, :]}\")\n", "print(f\"Part of edge[user:like:item] feature [feat_1]: {edge_like_feat_1[:3, :]}\")\n",
"torch.save(edge_like_feat_1, edge_like_feat_1_path)\n", "torch.save(edge_like_feat_1, edge_like_feat_1_path)\n",
"print(f\"Edge[user:like:item] feature [feat_1] is saved to {edge_like_feat_1_path}\\n\")\n", "print(f\"Edge[user:like:item] feature [feat_1] is saved to {edge_like_feat_1_path}\\n\")\n",
"\n", "\n",
"# Generate edge[user:follow:user] feature in numpy array.\n", "# Generate edge[user:follow:user] feature in numpy array.\n",
"edge_follow_feat_0_path = os.path.join(base_dir, \"edge-follow-feat-0.npy\")\n", "edge_follow_feat_0_path = os.path.join(base_dir, \"edge-follow-feat-0.npy\")\n",
"edge_follow_feat_0 = np.random.rand(num_edges, 5)\n", "edge_follow_feat_0 = np.random.rand(num_edges, 5)\n",
"print(f\"Part of edge[user:follow:user] feature [feat_0]: {edge_follow_feat_0[:10, :]}\")\n", "print(f\"Part of edge[user:follow:user] feature [feat_0]: {edge_follow_feat_0[:3, :]}\")\n",
"np.save(edge_follow_feat_0_path, edge_follow_feat_0)\n", "np.save(edge_follow_feat_0_path, edge_follow_feat_0)\n",
"print(f\"Edge[user:follow:user] feature [feat_0] is saved to {edge_follow_feat_0_path}\\n\")\n", "print(f\"Edge[user:follow:user] feature [feat_0] is saved to {edge_follow_feat_0_path}\\n\")\n",
"\n", "\n",
"# Generate another edge[user:follow:user] feature in torch tensor\n", "# Generate another edge[user:follow:user] feature in torch tensor\n",
"edge_follow_feat_1_path = os.path.join(base_dir, \"edge-follow-feat-1.pt\")\n", "edge_follow_feat_1_path = os.path.join(base_dir, \"edge-follow-feat-1.pt\")\n",
"edge_follow_feat_1 = torch.rand(num_edges, 5)\n", "edge_follow_feat_1 = torch.rand(num_edges, 5)\n",
"print(f\"Part of edge[user:follow:user] feature [feat_1]: {edge_follow_feat_1[:10, :]}\")\n", "print(f\"Part of edge[user:follow:user] feature [feat_1]: {edge_follow_feat_1[:3, :]}\")\n",
"torch.save(edge_follow_feat_1, edge_follow_feat_1_path)\n", "torch.save(edge_follow_feat_1, edge_follow_feat_1_path)\n",
"print(f\"Edge[user:follow:user] feature [feat_1] is saved to {edge_follow_feat_1_path}\\n\")" "print(f\"Edge[user:follow:user] feature [feat_1] is saved to {edge_follow_feat_1_path}\\n\")"
], ],
...@@ -261,84 +261,84 @@ ...@@ -261,84 +261,84 @@
"# Train IDs for user.\n", "# Train IDs for user.\n",
"nc_train_user_ids_path = os.path.join(base_dir, \"nc-train-user-ids.npy\")\n", "nc_train_user_ids_path = os.path.join(base_dir, \"nc-train-user-ids.npy\")\n",
"nc_train_user_ids = user_ids[:num_trains]\n", "nc_train_user_ids = user_ids[:num_trains]\n",
"print(f\"Part of train ids[user] for node classification: {nc_train_user_ids[:10]}\")\n", "print(f\"Part of train ids[user] for node classification: {nc_train_user_ids[:3]}\")\n",
"np.save(nc_train_user_ids_path, nc_train_user_ids)\n", "np.save(nc_train_user_ids_path, nc_train_user_ids)\n",
"print(f\"NC train ids[user] are saved to {nc_train_user_ids_path}\\n\")\n", "print(f\"NC train ids[user] are saved to {nc_train_user_ids_path}\\n\")\n",
"\n", "\n",
"# Train labels for user.\n", "# Train labels for user.\n",
"nc_train_user_labels_path = os.path.join(base_dir, \"nc-train-user-labels.pt\")\n", "nc_train_user_labels_path = os.path.join(base_dir, \"nc-train-user-labels.pt\")\n",
"nc_train_user_labels = torch.randint(0, 10, (num_trains,))\n", "nc_train_user_labels = torch.randint(0, 10, (num_trains,))\n",
"print(f\"Part of train labels[user] for node classification: {nc_train_user_labels[:10]}\")\n", "print(f\"Part of train labels[user] for node classification: {nc_train_user_labels[:3]}\")\n",
"torch.save(nc_train_user_labels, nc_train_user_labels_path)\n", "torch.save(nc_train_user_labels, nc_train_user_labels_path)\n",
"print(f\"NC train labels[user] are saved to {nc_train_user_labels_path}\\n\")\n", "print(f\"NC train labels[user] are saved to {nc_train_user_labels_path}\\n\")\n",
"\n", "\n",
"# Train IDs for item.\n", "# Train IDs for item.\n",
"nc_train_item_ids_path = os.path.join(base_dir, \"nc-train-item-ids.npy\")\n", "nc_train_item_ids_path = os.path.join(base_dir, \"nc-train-item-ids.npy\")\n",
"nc_train_item_ids = item_ids[:num_trains]\n", "nc_train_item_ids = item_ids[:num_trains]\n",
"print(f\"Part of train ids[item] for node classification: {nc_train_item_ids[:10]}\")\n", "print(f\"Part of train ids[item] for node classification: {nc_train_item_ids[:3]}\")\n",
"np.save(nc_train_item_ids_path, nc_train_item_ids)\n", "np.save(nc_train_item_ids_path, nc_train_item_ids)\n",
"print(f\"NC train ids[item] are saved to {nc_train_item_ids_path}\\n\")\n", "print(f\"NC train ids[item] are saved to {nc_train_item_ids_path}\\n\")\n",
"\n", "\n",
"# Train labels for item.\n", "# Train labels for item.\n",
"nc_train_item_labels_path = os.path.join(base_dir, \"nc-train-item-labels.pt\")\n", "nc_train_item_labels_path = os.path.join(base_dir, \"nc-train-item-labels.pt\")\n",
"nc_train_item_labels = torch.randint(0, 10, (num_trains,))\n", "nc_train_item_labels = torch.randint(0, 10, (num_trains,))\n",
"print(f\"Part of train labels[item] for node classification: {nc_train_item_labels[:10]}\")\n", "print(f\"Part of train labels[item] for node classification: {nc_train_item_labels[:3]}\")\n",
"torch.save(nc_train_item_labels, nc_train_item_labels_path)\n", "torch.save(nc_train_item_labels, nc_train_item_labels_path)\n",
"print(f\"NC train labels[item] are saved to {nc_train_item_labels_path}\\n\")\n", "print(f\"NC train labels[item] are saved to {nc_train_item_labels_path}\\n\")\n",
"\n", "\n",
"# Val IDs for user.\n", "# Val IDs for user.\n",
"nc_val_user_ids_path = os.path.join(base_dir, \"nc-val-user-ids.npy\")\n", "nc_val_user_ids_path = os.path.join(base_dir, \"nc-val-user-ids.npy\")\n",
"nc_val_user_ids = user_ids[num_trains:num_trains+num_vals]\n", "nc_val_user_ids = user_ids[num_trains:num_trains+num_vals]\n",
"print(f\"Part of val ids[user] for node classification: {nc_val_user_ids[:10]}\")\n", "print(f\"Part of val ids[user] for node classification: {nc_val_user_ids[:3]}\")\n",
"np.save(nc_val_user_ids_path, nc_val_user_ids)\n", "np.save(nc_val_user_ids_path, nc_val_user_ids)\n",
"print(f\"NC val ids[user] are saved to {nc_val_user_ids_path}\\n\")\n", "print(f\"NC val ids[user] are saved to {nc_val_user_ids_path}\\n\")\n",
"\n", "\n",
"# Val labels for user.\n", "# Val labels for user.\n",
"nc_val_user_labels_path = os.path.join(base_dir, \"nc-val-user-labels.pt\")\n", "nc_val_user_labels_path = os.path.join(base_dir, \"nc-val-user-labels.pt\")\n",
"nc_val_user_labels = torch.randint(0, 10, (num_vals,))\n", "nc_val_user_labels = torch.randint(0, 10, (num_vals,))\n",
"print(f\"Part of val labels[user] for node classification: {nc_val_user_labels[:10]}\")\n", "print(f\"Part of val labels[user] for node classification: {nc_val_user_labels[:3]}\")\n",
"torch.save(nc_val_user_labels, nc_val_user_labels_path)\n", "torch.save(nc_val_user_labels, nc_val_user_labels_path)\n",
"print(f\"NC val labels[user] are saved to {nc_val_user_labels_path}\\n\")\n", "print(f\"NC val labels[user] are saved to {nc_val_user_labels_path}\\n\")\n",
"\n", "\n",
"# Val IDs for item.\n", "# Val IDs for item.\n",
"nc_val_item_ids_path = os.path.join(base_dir, \"nc-val-item-ids.npy\")\n", "nc_val_item_ids_path = os.path.join(base_dir, \"nc-val-item-ids.npy\")\n",
"nc_val_item_ids = item_ids[num_trains:num_trains+num_vals]\n", "nc_val_item_ids = item_ids[num_trains:num_trains+num_vals]\n",
"print(f\"Part of val ids[item] for node classification: {nc_val_item_ids[:10]}\")\n", "print(f\"Part of val ids[item] for node classification: {nc_val_item_ids[:3]}\")\n",
"np.save(nc_val_item_ids_path, nc_val_item_ids)\n", "np.save(nc_val_item_ids_path, nc_val_item_ids)\n",
"print(f\"NC val ids[item] are saved to {nc_val_item_ids_path}\\n\")\n", "print(f\"NC val ids[item] are saved to {nc_val_item_ids_path}\\n\")\n",
"\n", "\n",
"# Val labels for item.\n", "# Val labels for item.\n",
"nc_val_item_labels_path = os.path.join(base_dir, \"nc-val-item-labels.pt\")\n", "nc_val_item_labels_path = os.path.join(base_dir, \"nc-val-item-labels.pt\")\n",
"nc_val_item_labels = torch.randint(0, 10, (num_vals,))\n", "nc_val_item_labels = torch.randint(0, 10, (num_vals,))\n",
"print(f\"Part of val labels[item] for node classification: {nc_val_item_labels[:10]}\")\n", "print(f\"Part of val labels[item] for node classification: {nc_val_item_labels[:3]}\")\n",
"torch.save(nc_val_item_labels, nc_val_item_labels_path)\n", "torch.save(nc_val_item_labels, nc_val_item_labels_path)\n",
"print(f\"NC val labels[item] are saved to {nc_val_item_labels_path}\\n\")\n", "print(f\"NC val labels[item] are saved to {nc_val_item_labels_path}\\n\")\n",
"\n", "\n",
"# Test IDs for user.\n", "# Test IDs for user.\n",
"nc_test_user_ids_path = os.path.join(base_dir, \"nc-test-user-ids.npy\")\n", "nc_test_user_ids_path = os.path.join(base_dir, \"nc-test-user-ids.npy\")\n",
"nc_test_user_ids = user_ids[-num_tests:]\n", "nc_test_user_ids = user_ids[-num_tests:]\n",
"print(f\"Part of test ids[user] for node classification: {nc_test_user_ids[:10]}\")\n", "print(f\"Part of test ids[user] for node classification: {nc_test_user_ids[:3]}\")\n",
"np.save(nc_test_user_ids_path, nc_test_user_ids)\n", "np.save(nc_test_user_ids_path, nc_test_user_ids)\n",
"print(f\"NC test ids[user] are saved to {nc_test_user_ids_path}\\n\")\n", "print(f\"NC test ids[user] are saved to {nc_test_user_ids_path}\\n\")\n",
"\n", "\n",
"# Test labels for user.\n", "# Test labels for user.\n",
"nc_test_user_labels_path = os.path.join(base_dir, \"nc-test-user-labels.pt\")\n", "nc_test_user_labels_path = os.path.join(base_dir, \"nc-test-user-labels.pt\")\n",
"nc_test_user_labels = torch.randint(0, 10, (num_tests,))\n", "nc_test_user_labels = torch.randint(0, 10, (num_tests,))\n",
"print(f\"Part of test labels[user] for node classification: {nc_test_user_labels[:10]}\")\n", "print(f\"Part of test labels[user] for node classification: {nc_test_user_labels[:3]}\")\n",
"torch.save(nc_test_user_labels, nc_test_user_labels_path)\n", "torch.save(nc_test_user_labels, nc_test_user_labels_path)\n",
"print(f\"NC test labels[user] are saved to {nc_test_user_labels_path}\\n\")\n", "print(f\"NC test labels[user] are saved to {nc_test_user_labels_path}\\n\")\n",
"\n", "\n",
"# Test IDs for item.\n", "# Test IDs for item.\n",
"nc_test_item_ids_path = os.path.join(base_dir, \"nc-test-item-ids.npy\")\n", "nc_test_item_ids_path = os.path.join(base_dir, \"nc-test-item-ids.npy\")\n",
"nc_test_item_ids = item_ids[-num_tests:]\n", "nc_test_item_ids = item_ids[-num_tests:]\n",
"print(f\"Part of test ids[item] for node classification: {nc_test_item_ids[:10]}\")\n", "print(f\"Part of test ids[item] for node classification: {nc_test_item_ids[:3]}\")\n",
"np.save(nc_test_item_ids_path, nc_test_item_ids)\n", "np.save(nc_test_item_ids_path, nc_test_item_ids)\n",
"print(f\"NC test ids[item] are saved to {nc_test_item_ids_path}\\n\")\n", "print(f\"NC test ids[item] are saved to {nc_test_item_ids_path}\\n\")\n",
"\n", "\n",
"# Test labels for item.\n", "# Test labels for item.\n",
"nc_test_item_labels_path = os.path.join(base_dir, \"nc-test-item-labels.pt\")\n", "nc_test_item_labels_path = os.path.join(base_dir, \"nc-test-item-labels.pt\")\n",
"nc_test_item_labels = torch.randint(0, 10, (num_tests,))\n", "nc_test_item_labels = torch.randint(0, 10, (num_tests,))\n",
"print(f\"Part of test labels[item] for node classification: {nc_test_item_labels[:10]}\")\n", "print(f\"Part of test labels[item] for node classification: {nc_test_item_labels[:3]}\")\n",
"torch.save(nc_test_item_labels, nc_test_item_labels_path)\n", "torch.save(nc_test_item_labels, nc_test_item_labels_path)\n",
"print(f\"NC test labels[item] are saved to {nc_test_item_labels_path}\\n\")" "print(f\"NC test labels[item] are saved to {nc_test_item_labels_path}\\n\")"
], ],
...@@ -369,70 +369,70 @@ ...@@ -369,70 +369,70 @@
"# Train node pairs for user:like:item.\n", "# Train node pairs for user:like:item.\n",
"lp_train_like_node_pairs_path = os.path.join(base_dir, \"lp-train-like-node-pairs.npy\")\n", "lp_train_like_node_pairs_path = os.path.join(base_dir, \"lp-train-like-node-pairs.npy\")\n",
"lp_train_like_node_pairs = like_edges[:num_trains, :]\n", "lp_train_like_node_pairs = like_edges[:num_trains, :]\n",
"print(f\"Part of train node pairs[user:like:item] for link prediction: {lp_train_like_node_pairs[:10]}\")\n", "print(f\"Part of train node pairs[user:like:item] for link prediction: {lp_train_like_node_pairs[:3]}\")\n",
"np.save(lp_train_like_node_pairs_path, lp_train_like_node_pairs)\n", "np.save(lp_train_like_node_pairs_path, lp_train_like_node_pairs)\n",
"print(f\"LP train node pairs[user:like:item] are saved to {lp_train_like_node_pairs_path}\\n\")\n", "print(f\"LP train node pairs[user:like:item] are saved to {lp_train_like_node_pairs_path}\\n\")\n",
"\n", "\n",
"# Train node pairs for user:follow:user.\n", "# Train node pairs for user:follow:user.\n",
"lp_train_follow_node_pairs_path = os.path.join(base_dir, \"lp-train-follow-node-pairs.npy\")\n", "lp_train_follow_node_pairs_path = os.path.join(base_dir, \"lp-train-follow-node-pairs.npy\")\n",
"lp_train_follow_node_pairs = follow_edges[:num_trains, :]\n", "lp_train_follow_node_pairs = follow_edges[:num_trains, :]\n",
"print(f\"Part of train node pairs[user:follow:user] for link prediction: {lp_train_follow_node_pairs[:10]}\")\n", "print(f\"Part of train node pairs[user:follow:user] for link prediction: {lp_train_follow_node_pairs[:3]}\")\n",
"np.save(lp_train_follow_node_pairs_path, lp_train_follow_node_pairs)\n", "np.save(lp_train_follow_node_pairs_path, lp_train_follow_node_pairs)\n",
"print(f\"LP train node pairs[user:follow:user] are saved to {lp_train_follow_node_pairs_path}\\n\")\n", "print(f\"LP train node pairs[user:follow:user] are saved to {lp_train_follow_node_pairs_path}\\n\")\n",
"\n", "\n",
"# Val node pairs for user:like:item.\n", "# Val node pairs for user:like:item.\n",
"lp_val_like_node_pairs_path = os.path.join(base_dir, \"lp-val-like-node-pairs.npy\")\n", "lp_val_like_node_pairs_path = os.path.join(base_dir, \"lp-val-like-node-pairs.npy\")\n",
"lp_val_like_node_pairs = like_edges[num_trains:num_trains+num_vals, :]\n", "lp_val_like_node_pairs = like_edges[num_trains:num_trains+num_vals, :]\n",
"print(f\"Part of val node pairs[user:like:item] for link prediction: {lp_val_like_node_pairs[:10]}\")\n", "print(f\"Part of val node pairs[user:like:item] for link prediction: {lp_val_like_node_pairs[:3]}\")\n",
"np.save(lp_val_like_node_pairs_path, lp_val_like_node_pairs)\n", "np.save(lp_val_like_node_pairs_path, lp_val_like_node_pairs)\n",
"print(f\"LP val node pairs[user:like:item] are saved to {lp_val_like_node_pairs_path}\\n\")\n", "print(f\"LP val node pairs[user:like:item] are saved to {lp_val_like_node_pairs_path}\\n\")\n",
"\n", "\n",
"# Val negative dsts for user:like:item.\n", "# Val negative dsts for user:like:item.\n",
"lp_val_like_neg_dsts_path = os.path.join(base_dir, \"lp-val-like-neg-dsts.pt\")\n", "lp_val_like_neg_dsts_path = os.path.join(base_dir, \"lp-val-like-neg-dsts.pt\")\n",
"lp_val_like_neg_dsts = torch.randint(0, num_nodes, (num_vals, 10))\n", "lp_val_like_neg_dsts = torch.randint(0, num_nodes, (num_vals, 10))\n",
"print(f\"Part of val negative dsts[user:like:item] for link prediction: {lp_val_like_neg_dsts[:10]}\")\n", "print(f\"Part of val negative dsts[user:like:item] for link prediction: {lp_val_like_neg_dsts[:3]}\")\n",
"torch.save(lp_val_like_neg_dsts, lp_val_like_neg_dsts_path)\n", "torch.save(lp_val_like_neg_dsts, lp_val_like_neg_dsts_path)\n",
"print(f\"LP val negative dsts[user:like:item] are saved to {lp_val_like_neg_dsts_path}\\n\")\n", "print(f\"LP val negative dsts[user:like:item] are saved to {lp_val_like_neg_dsts_path}\\n\")\n",
"\n", "\n",
"# Val node pairs for user:follow:user.\n", "# Val node pairs for user:follow:user.\n",
"lp_val_follow_node_pairs_path = os.path.join(base_dir, \"lp-val-follow-node-pairs.npy\")\n", "lp_val_follow_node_pairs_path = os.path.join(base_dir, \"lp-val-follow-node-pairs.npy\")\n",
"lp_val_follow_node_pairs = follow_edges[num_trains:num_trains+num_vals, :]\n", "lp_val_follow_node_pairs = follow_edges[num_trains:num_trains+num_vals, :]\n",
"print(f\"Part of val node pairs[user:follow:user] for link prediction: {lp_val_follow_node_pairs[:10]}\")\n", "print(f\"Part of val node pairs[user:follow:user] for link prediction: {lp_val_follow_node_pairs[:3]}\")\n",
"np.save(lp_val_follow_node_pairs_path, lp_val_follow_node_pairs)\n", "np.save(lp_val_follow_node_pairs_path, lp_val_follow_node_pairs)\n",
"print(f\"LP val node pairs[user:follow:user] are saved to {lp_val_follow_node_pairs_path}\\n\")\n", "print(f\"LP val node pairs[user:follow:user] are saved to {lp_val_follow_node_pairs_path}\\n\")\n",
"\n", "\n",
"# Val negative dsts for user:follow:user.\n", "# Val negative dsts for user:follow:user.\n",
"lp_val_follow_neg_dsts_path = os.path.join(base_dir, \"lp-val-follow-neg-dsts.pt\")\n", "lp_val_follow_neg_dsts_path = os.path.join(base_dir, \"lp-val-follow-neg-dsts.pt\")\n",
"lp_val_follow_neg_dsts = torch.randint(0, num_nodes, (num_vals, 10))\n", "lp_val_follow_neg_dsts = torch.randint(0, num_nodes, (num_vals, 10))\n",
"print(f\"Part of val negative dsts[user:follow:user] for link prediction: {lp_val_follow_neg_dsts[:10]}\")\n", "print(f\"Part of val negative dsts[user:follow:user] for link prediction: {lp_val_follow_neg_dsts[:3]}\")\n",
"torch.save(lp_val_follow_neg_dsts, lp_val_follow_neg_dsts_path)\n", "torch.save(lp_val_follow_neg_dsts, lp_val_follow_neg_dsts_path)\n",
"print(f\"LP val negative dsts[user:follow:user] are saved to {lp_val_follow_neg_dsts_path}\\n\")\n", "print(f\"LP val negative dsts[user:follow:user] are saved to {lp_val_follow_neg_dsts_path}\\n\")\n",
"\n", "\n",
"# Test node paris for user:like:item.\n", "# Test node paris for user:like:item.\n",
"lp_test_like_node_pairs_path = os.path.join(base_dir, \"lp-test-like-node-pairs.npy\")\n", "lp_test_like_node_pairs_path = os.path.join(base_dir, \"lp-test-like-node-pairs.npy\")\n",
"lp_test_like_node_pairs = like_edges[-num_tests, :]\n", "lp_test_like_node_pairs = like_edges[-num_tests, :]\n",
"print(f\"Part of test node pairs[user:like:item] for link prediction: {lp_test_like_node_pairs[:10]}\")\n", "print(f\"Part of test node pairs[user:like:item] for link prediction: {lp_test_like_node_pairs[:3]}\")\n",
"np.save(lp_test_like_node_pairs_path, lp_test_like_node_pairs)\n", "np.save(lp_test_like_node_pairs_path, lp_test_like_node_pairs)\n",
"print(f\"LP test node pairs[user:like:item] are saved to {lp_test_like_node_pairs_path}\\n\")\n", "print(f\"LP test node pairs[user:like:item] are saved to {lp_test_like_node_pairs_path}\\n\")\n",
"\n", "\n",
"# Test negative dsts for user:like:item.\n", "# Test negative dsts for user:like:item.\n",
"lp_test_like_neg_dsts_path = os.path.join(base_dir, \"lp-test-like-neg-dsts.pt\")\n", "lp_test_like_neg_dsts_path = os.path.join(base_dir, \"lp-test-like-neg-dsts.pt\")\n",
"lp_test_like_neg_dsts = torch.randint(0, num_nodes, (num_tests, 10))\n", "lp_test_like_neg_dsts = torch.randint(0, num_nodes, (num_tests, 10))\n",
"print(f\"Part of test negative dsts[user:like:item] for link prediction: {lp_test_like_neg_dsts[:10]}\")\n", "print(f\"Part of test negative dsts[user:like:item] for link prediction: {lp_test_like_neg_dsts[:3]}\")\n",
"torch.save(lp_test_like_neg_dsts, lp_test_like_neg_dsts_path)\n", "torch.save(lp_test_like_neg_dsts, lp_test_like_neg_dsts_path)\n",
"print(f\"LP test negative dsts[user:like:item] are saved to {lp_test_like_neg_dsts_path}\\n\")\n", "print(f\"LP test negative dsts[user:like:item] are saved to {lp_test_like_neg_dsts_path}\\n\")\n",
"\n", "\n",
"# Test node paris for user:follow:user.\n", "# Test node paris for user:follow:user.\n",
"lp_test_follow_node_pairs_path = os.path.join(base_dir, \"lp-test-follow-node-pairs.npy\")\n", "lp_test_follow_node_pairs_path = os.path.join(base_dir, \"lp-test-follow-node-pairs.npy\")\n",
"lp_test_follow_node_pairs = follow_edges[-num_tests, :]\n", "lp_test_follow_node_pairs = follow_edges[-num_tests, :]\n",
"print(f\"Part of test node pairs[user:follow:user] for link prediction: {lp_test_follow_node_pairs[:10]}\")\n", "print(f\"Part of test node pairs[user:follow:user] for link prediction: {lp_test_follow_node_pairs[:3]}\")\n",
"np.save(lp_test_follow_node_pairs_path, lp_test_follow_node_pairs)\n", "np.save(lp_test_follow_node_pairs_path, lp_test_follow_node_pairs)\n",
"print(f\"LP test node pairs[user:follow:user] are saved to {lp_test_follow_node_pairs_path}\\n\")\n", "print(f\"LP test node pairs[user:follow:user] are saved to {lp_test_follow_node_pairs_path}\\n\")\n",
"\n", "\n",
"# Test negative dsts for user:follow:user.\n", "# Test negative dsts for user:follow:user.\n",
"lp_test_follow_neg_dsts_path = os.path.join(base_dir, \"lp-test-follow-neg-dsts.pt\")\n", "lp_test_follow_neg_dsts_path = os.path.join(base_dir, \"lp-test-follow-neg-dsts.pt\")\n",
"lp_test_follow_neg_dsts = torch.randint(0, num_nodes, (num_tests, 10))\n", "lp_test_follow_neg_dsts = torch.randint(0, num_nodes, (num_tests, 10))\n",
"print(f\"Part of test negative dsts[user:follow:user] for link prediction: {lp_test_follow_neg_dsts[:10]}\")\n", "print(f\"Part of test negative dsts[user:follow:user] for link prediction: {lp_test_follow_neg_dsts[:3]}\")\n",
"torch.save(lp_test_follow_neg_dsts, lp_test_follow_neg_dsts_path)\n", "torch.save(lp_test_follow_neg_dsts, lp_test_follow_neg_dsts_path)\n",
"print(f\"LP test negative dsts[user:follow:user] are saved to {lp_test_follow_neg_dsts_path}\\n\")" "print(f\"LP test negative dsts[user:follow:user] are saved to {lp_test_follow_neg_dsts_path}\\n\")"
], ],
......
...@@ -122,7 +122,7 @@ ...@@ -122,7 +122,7 @@
"edges_path = os.path.join(base_dir, \"edges.csv\")\n", "edges_path = os.path.join(base_dir, \"edges.csv\")\n",
"edges = np.random.randint(0, num_nodes, size=(num_edges, 2))\n", "edges = np.random.randint(0, num_nodes, size=(num_edges, 2))\n",
"\n", "\n",
"print(f\"Part of edges: {edges[:10, :]}\")\n", "print(f\"Part of edges: {edges[:5, :]}\")\n",
"\n", "\n",
"df = pd.DataFrame(edges)\n", "df = pd.DataFrame(edges)\n",
"df.to_csv(edges_path, index=False, header=False)\n", "df.to_csv(edges_path, index=False, header=False)\n",
...@@ -151,28 +151,28 @@ ...@@ -151,28 +151,28 @@
"# Generate node feature in numpy array.\n", "# Generate node feature in numpy array.\n",
"node_feat_0_path = os.path.join(base_dir, \"node-feat-0.npy\")\n", "node_feat_0_path = os.path.join(base_dir, \"node-feat-0.npy\")\n",
"node_feat_0 = np.random.rand(num_nodes, 5)\n", "node_feat_0 = np.random.rand(num_nodes, 5)\n",
"print(f\"Part of node feature [feat_0]: {node_feat_0[:10, :]}\")\n", "print(f\"Part of node feature [feat_0]: {node_feat_0[:3, :]}\")\n",
"np.save(node_feat_0_path, node_feat_0)\n", "np.save(node_feat_0_path, node_feat_0)\n",
"print(f\"Node feature [feat_0] is saved to {node_feat_0_path}\\n\")\n", "print(f\"Node feature [feat_0] is saved to {node_feat_0_path}\\n\")\n",
"\n", "\n",
"# Generate another node feature in torch tensor\n", "# Generate another node feature in torch tensor\n",
"node_feat_1_path = os.path.join(base_dir, \"node-feat-1.pt\")\n", "node_feat_1_path = os.path.join(base_dir, \"node-feat-1.pt\")\n",
"node_feat_1 = torch.rand(num_nodes, 5)\n", "node_feat_1 = torch.rand(num_nodes, 5)\n",
"print(f\"Part of node feature [feat_1]: {node_feat_1[:10, :]}\")\n", "print(f\"Part of node feature [feat_1]: {node_feat_1[:3, :]}\")\n",
"torch.save(node_feat_1, node_feat_1_path)\n", "torch.save(node_feat_1, node_feat_1_path)\n",
"print(f\"Node feature [feat_1] is saved to {node_feat_1_path}\\n\")\n", "print(f\"Node feature [feat_1] is saved to {node_feat_1_path}\\n\")\n",
"\n", "\n",
"# Generate edge feature in numpy array.\n", "# Generate edge feature in numpy array.\n",
"edge_feat_0_path = os.path.join(base_dir, \"edge-feat-0.npy\")\n", "edge_feat_0_path = os.path.join(base_dir, \"edge-feat-0.npy\")\n",
"edge_feat_0 = np.random.rand(num_edges, 5)\n", "edge_feat_0 = np.random.rand(num_edges, 5)\n",
"print(f\"Part of edge feature [feat_0]: {edge_feat_0[:10, :]}\")\n", "print(f\"Part of edge feature [feat_0]: {edge_feat_0[:3, :]}\")\n",
"np.save(edge_feat_0_path, edge_feat_0)\n", "np.save(edge_feat_0_path, edge_feat_0)\n",
"print(f\"Edge feature [feat_0] is saved to {edge_feat_0_path}\\n\")\n", "print(f\"Edge feature [feat_0] is saved to {edge_feat_0_path}\\n\")\n",
"\n", "\n",
"# Generate another edge feature in torch tensor\n", "# Generate another edge feature in torch tensor\n",
"edge_feat_1_path = os.path.join(base_dir, \"edge-feat-1.pt\")\n", "edge_feat_1_path = os.path.join(base_dir, \"edge-feat-1.pt\")\n",
"edge_feat_1 = torch.rand(num_edges, 5)\n", "edge_feat_1 = torch.rand(num_edges, 5)\n",
"print(f\"Part of edge feature [feat_1]: {edge_feat_1[:10, :]}\")\n", "print(f\"Part of edge feature [feat_1]: {edge_feat_1[:3, :]}\")\n",
"torch.save(edge_feat_1, edge_feat_1_path)\n", "torch.save(edge_feat_1, edge_feat_1_path)\n",
"print(f\"Edge feature [feat_1] is saved to {edge_feat_1_path}\\n\")\n" "print(f\"Edge feature [feat_1] is saved to {edge_feat_1_path}\\n\")\n"
], ],
...@@ -214,37 +214,37 @@ ...@@ -214,37 +214,37 @@
"\n", "\n",
"nc_train_ids_path = os.path.join(base_dir, \"nc-train-ids.npy\")\n", "nc_train_ids_path = os.path.join(base_dir, \"nc-train-ids.npy\")\n",
"nc_train_ids = ids[:num_trains]\n", "nc_train_ids = ids[:num_trains]\n",
"print(f\"Part of train ids for node classification: {nc_train_ids[:10]}\")\n", "print(f\"Part of train ids for node classification: {nc_train_ids[:3]}\")\n",
"np.save(nc_train_ids_path, nc_train_ids)\n", "np.save(nc_train_ids_path, nc_train_ids)\n",
"print(f\"NC train ids are saved to {nc_train_ids_path}\\n\")\n", "print(f\"NC train ids are saved to {nc_train_ids_path}\\n\")\n",
"\n", "\n",
"nc_train_labels_path = os.path.join(base_dir, \"nc-train-labels.pt\")\n", "nc_train_labels_path = os.path.join(base_dir, \"nc-train-labels.pt\")\n",
"nc_train_labels = torch.randint(0, 10, (num_trains,))\n", "nc_train_labels = torch.randint(0, 10, (num_trains,))\n",
"print(f\"Part of train labels for node classification: {nc_train_labels[:10]}\")\n", "print(f\"Part of train labels for node classification: {nc_train_labels[:3]}\")\n",
"torch.save(nc_train_labels, nc_train_labels_path)\n", "torch.save(nc_train_labels, nc_train_labels_path)\n",
"print(f\"NC train labels are saved to {nc_train_labels_path}\\n\")\n", "print(f\"NC train labels are saved to {nc_train_labels_path}\\n\")\n",
"\n", "\n",
"nc_val_ids_path = os.path.join(base_dir, \"nc-val-ids.npy\")\n", "nc_val_ids_path = os.path.join(base_dir, \"nc-val-ids.npy\")\n",
"nc_val_ids = ids[num_trains:num_trains+num_vals]\n", "nc_val_ids = ids[num_trains:num_trains+num_vals]\n",
"print(f\"Part of val ids for node classification: {nc_val_ids[:10]}\")\n", "print(f\"Part of val ids for node classification: {nc_val_ids[:3]}\")\n",
"np.save(nc_val_ids_path, nc_val_ids)\n", "np.save(nc_val_ids_path, nc_val_ids)\n",
"print(f\"NC val ids are saved to {nc_val_ids_path}\\n\")\n", "print(f\"NC val ids are saved to {nc_val_ids_path}\\n\")\n",
"\n", "\n",
"nc_val_labels_path = os.path.join(base_dir, \"nc-val-labels.pt\")\n", "nc_val_labels_path = os.path.join(base_dir, \"nc-val-labels.pt\")\n",
"nc_val_labels = torch.randint(0, 10, (num_vals,))\n", "nc_val_labels = torch.randint(0, 10, (num_vals,))\n",
"print(f\"Part of val labels for node classification: {nc_val_labels[:10]}\")\n", "print(f\"Part of val labels for node classification: {nc_val_labels[:3]}\")\n",
"torch.save(nc_val_labels, nc_val_labels_path)\n", "torch.save(nc_val_labels, nc_val_labels_path)\n",
"print(f\"NC val labels are saved to {nc_val_labels_path}\\n\")\n", "print(f\"NC val labels are saved to {nc_val_labels_path}\\n\")\n",
"\n", "\n",
"nc_test_ids_path = os.path.join(base_dir, \"nc-test-ids.npy\")\n", "nc_test_ids_path = os.path.join(base_dir, \"nc-test-ids.npy\")\n",
"nc_test_ids = ids[-num_tests:]\n", "nc_test_ids = ids[-num_tests:]\n",
"print(f\"Part of test ids for node classification: {nc_test_ids[:10]}\")\n", "print(f\"Part of test ids for node classification: {nc_test_ids[:3]}\")\n",
"np.save(nc_test_ids_path, nc_test_ids)\n", "np.save(nc_test_ids_path, nc_test_ids)\n",
"print(f\"NC test ids are saved to {nc_test_ids_path}\\n\")\n", "print(f\"NC test ids are saved to {nc_test_ids_path}\\n\")\n",
"\n", "\n",
"nc_test_labels_path = os.path.join(base_dir, \"nc-test-labels.pt\")\n", "nc_test_labels_path = os.path.join(base_dir, \"nc-test-labels.pt\")\n",
"nc_test_labels = torch.randint(0, 10, (num_tests,))\n", "nc_test_labels = torch.randint(0, 10, (num_tests,))\n",
"print(f\"Part of test labels for node classification: {nc_test_labels[:10]}\")\n", "print(f\"Part of test labels for node classification: {nc_test_labels[:3]}\")\n",
"torch.save(nc_test_labels, nc_test_labels_path)\n", "torch.save(nc_test_labels, nc_test_labels_path)\n",
"print(f\"NC test labels are saved to {nc_test_labels_path}\\n\")" "print(f\"NC test labels are saved to {nc_test_labels_path}\\n\")"
], ],
...@@ -273,31 +273,31 @@ ...@@ -273,31 +273,31 @@
"\n", "\n",
"lp_train_node_pairs_path = os.path.join(base_dir, \"lp-train-node-pairs.npy\")\n", "lp_train_node_pairs_path = os.path.join(base_dir, \"lp-train-node-pairs.npy\")\n",
"lp_train_node_pairs = edges[:num_trains, :]\n", "lp_train_node_pairs = edges[:num_trains, :]\n",
"print(f\"Part of train node pairs for link prediction: {lp_train_node_pairs[:10]}\")\n", "print(f\"Part of train node pairs for link prediction: {lp_train_node_pairs[:3]}\")\n",
"np.save(lp_train_node_pairs_path, lp_train_node_pairs)\n", "np.save(lp_train_node_pairs_path, lp_train_node_pairs)\n",
"print(f\"LP train node pairs are saved to {lp_train_node_pairs_path}\\n\")\n", "print(f\"LP train node pairs are saved to {lp_train_node_pairs_path}\\n\")\n",
"\n", "\n",
"lp_val_node_pairs_path = os.path.join(base_dir, \"lp-val-node-pairs.npy\")\n", "lp_val_node_pairs_path = os.path.join(base_dir, \"lp-val-node-pairs.npy\")\n",
"lp_val_node_pairs = edges[num_trains:num_trains+num_vals, :]\n", "lp_val_node_pairs = edges[num_trains:num_trains+num_vals, :]\n",
"print(f\"Part of val node pairs for link prediction: {lp_val_node_pairs[:10]}\")\n", "print(f\"Part of val node pairs for link prediction: {lp_val_node_pairs[:3]}\")\n",
"np.save(lp_val_node_pairs_path, lp_val_node_pairs)\n", "np.save(lp_val_node_pairs_path, lp_val_node_pairs)\n",
"print(f\"LP val node pairs are saved to {lp_val_node_pairs_path}\\n\")\n", "print(f\"LP val node pairs are saved to {lp_val_node_pairs_path}\\n\")\n",
"\n", "\n",
"lp_val_neg_dsts_path = os.path.join(base_dir, \"lp-val-neg-dsts.pt\")\n", "lp_val_neg_dsts_path = os.path.join(base_dir, \"lp-val-neg-dsts.pt\")\n",
"lp_val_neg_dsts = torch.randint(0, num_nodes, (num_vals, 10))\n", "lp_val_neg_dsts = torch.randint(0, num_nodes, (num_vals, 10))\n",
"print(f\"Part of val negative dsts for link prediction: {lp_val_neg_dsts[:10]}\")\n", "print(f\"Part of val negative dsts for link prediction: {lp_val_neg_dsts[:3]}\")\n",
"torch.save(lp_val_neg_dsts, lp_val_neg_dsts_path)\n", "torch.save(lp_val_neg_dsts, lp_val_neg_dsts_path)\n",
"print(f\"LP val negative dsts are saved to {lp_val_neg_dsts_path}\\n\")\n", "print(f\"LP val negative dsts are saved to {lp_val_neg_dsts_path}\\n\")\n",
"\n", "\n",
"lp_test_node_pairs_path = os.path.join(base_dir, \"lp-test-node-pairs.npy\")\n", "lp_test_node_pairs_path = os.path.join(base_dir, \"lp-test-node-pairs.npy\")\n",
"lp_test_node_pairs = edges[-num_tests, :]\n", "lp_test_node_pairs = edges[-num_tests, :]\n",
"print(f\"Part of test node pairs for link prediction: {lp_test_node_pairs[:10]}\")\n", "print(f\"Part of test node pairs for link prediction: {lp_test_node_pairs[:3]}\")\n",
"np.save(lp_test_node_pairs_path, lp_test_node_pairs)\n", "np.save(lp_test_node_pairs_path, lp_test_node_pairs)\n",
"print(f\"LP test node pairs are saved to {lp_test_node_pairs_path}\\n\")\n", "print(f\"LP test node pairs are saved to {lp_test_node_pairs_path}\\n\")\n",
"\n", "\n",
"lp_test_neg_dsts_path = os.path.join(base_dir, \"lp-test-neg-dsts.pt\")\n", "lp_test_neg_dsts_path = os.path.join(base_dir, \"lp-test-neg-dsts.pt\")\n",
"lp_test_neg_dsts = torch.randint(0, num_nodes, (num_tests, 10))\n", "lp_test_neg_dsts = torch.randint(0, num_nodes, (num_tests, 10))\n",
"print(f\"Part of test negative dsts for link prediction: {lp_test_neg_dsts[:10]}\")\n", "print(f\"Part of test negative dsts for link prediction: {lp_test_neg_dsts[:3]}\")\n",
"torch.save(lp_test_neg_dsts, lp_test_neg_dsts_path)\n", "torch.save(lp_test_neg_dsts, lp_test_neg_dsts_path)\n",
"print(f\"LP test negative dsts are saved to {lp_test_neg_dsts_path}\\n\")" "print(f\"LP test negative dsts are saved to {lp_test_neg_dsts_path}\\n\")"
], ],
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment