"sgl-kernel/python/vscode:/vscode.git/clone" did not exist on "e9f8e423189070d3b223457303ddce8ccb9ce1e7"
Unverified Commit 704bcaf6 authored by Hongzhi (Steve), Chen's avatar Hongzhi (Steve), Chen Committed by GitHub
Browse files
parent 6bc82161
......@@ -7,10 +7,10 @@ import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from model import ARMA4NC
from tqdm import trange
from dgl.data import CiteseerGraphDataset, CoraGraphDataset, PubmedGraphDataset
from model import ARMA4NC
from tqdm import trange
def main(args):
......
import math
import dgl.function as fn
import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl.function as fn
def glorot(tensor):
if tensor is not None:
......
......@@ -188,7 +188,6 @@ class BGNNPredictor:
def init_optimizer(
self, node_features, optimize_node_features, learning_rate
):
params = [self.model.parameters()]
if optimize_node_features:
params.append([node_features])
......
......@@ -7,15 +7,17 @@ import torch
import torch.nn.functional as F
from BGNN import BGNNPredictor
from category_encoders import CatBoostEncoder
from sklearn import preprocessing
from torch.nn import ELU, Dropout, Linear, ReLU, Sequential
from dgl.data.utils import load_graphs
from dgl.nn.pytorch import AGNNConv as AGNNConvDGL
from dgl.nn.pytorch import APPNPConv
from dgl.nn.pytorch import ChebConv as ChebConvDGL
from dgl.nn.pytorch import GATConv as GATConvDGL
from dgl.nn.pytorch import GraphConv
from dgl.nn.pytorch import (
AGNNConv as AGNNConvDGL,
APPNPConv,
ChebConv as ChebConvDGL,
GATConv as GATConvDGL,
GraphConv,
)
from sklearn import preprocessing
from torch.nn import Dropout, ELU, Linear, ReLU, Sequential
class GNNModelDGL(torch.nn.Module):
......
......@@ -2,10 +2,9 @@ import numpy as np
import torch
from sklearn import metrics
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import (GridSearchCV, ShuffleSplit,
train_test_split)
from sklearn.model_selection import GridSearchCV, ShuffleSplit, train_test_split
from sklearn.multiclass import OneVsRestClassifier
from sklearn.preprocessing import OneHotEncoder, normalize
from sklearn.preprocessing import normalize, OneHotEncoder
def fit_logistic_regression(X, y, data_random_seed=1, repeat=1):
......
......@@ -2,20 +2,27 @@ import copy
import os
import warnings
import dgl
import numpy as np
import torch
from eval_function import (fit_logistic_regression,
fit_logistic_regression_preset_splits,
fit_ppi_linear)
from model import (BGRL, GCN, GraphSAGE_GCN, MLP_Predictor,
compute_representations)
from eval_function import (
fit_logistic_regression,
fit_logistic_regression_preset_splits,
fit_ppi_linear,
)
from model import (
BGRL,
compute_representations,
GCN,
GraphSAGE_GCN,
MLP_Predictor,
)
from torch.nn.functional import cosine_similarity
from torch.optim import AdamW
from tqdm import tqdm
from utils import CosineDecayScheduler, get_dataset, get_graph_drop_transform
import dgl
warnings.filterwarnings("ignore")
......
import copy
import dgl
import torch
from dgl.nn.pytorch.conv import GraphConv, SAGEConv
from torch import nn
from torch.nn import BatchNorm1d, Parameter
from torch.nn.init import ones_, zeros_
import dgl
from dgl.nn.pytorch.conv import GraphConv, SAGEConv
class LayerNorm(nn.Module):
def __init__(self, in_channels, eps=1e-5, affine=True):
......
......@@ -3,9 +3,14 @@ import copy
import numpy as np
import torch
from dgl.data import (AmazonCoBuyComputerDataset, AmazonCoBuyPhotoDataset,
CoauthorCSDataset, CoauthorPhysicsDataset, PPIDataset,
WikiCSDataset)
from dgl.data import (
AmazonCoBuyComputerDataset,
AmazonCoBuyPhotoDataset,
CoauthorCSDataset,
CoauthorPhysicsDataset,
PPIDataset,
WikiCSDataset,
)
from dgl.dataloading import GraphDataLoader
from dgl.transforms import Compose, DropEdge, FeatMask, RowFeatNormalizer
......
import dgl
import dgl.function as fn
import torch
from DGLRoutingLayer import DGLRoutingLayer
from torch import nn
from torch.nn import functional as F
import dgl
import dgl.function as fn
class DGLDigitCapsuleLayer(nn.Module):
def __init__(
......
import dgl
import torch as th
import torch.nn as nn
import torch.nn.functional as F
import dgl
class DGLRoutingLayer(nn.Module):
def __init__(self, in_nodes, out_nodes, f_size, batch_size=0, device="cpu"):
......
import dgl
import torch as th
import torch.nn as nn
from DGLRoutingLayer import DGLRoutingLayer
from torch.nn import functional as F
import dgl
g = dgl.DGLGraph()
g.graph_data = {}
......
import argparse
import dgl
import torch as th
import torch.optim as optim
from model import CAREGNN
......@@ -7,8 +9,6 @@ from sklearn.metrics import recall_score, roc_auc_score
from torch.nn.functional import softmax
from utils import EarlyStopping
import dgl
def main(args):
# Step 1: Prepare graph data and retrieve train/validation/test index ============================= #
......
import argparse
import dgl
import torch as th
import torch.optim as optim
from model_sampling import CAREGNN, CARESampler, _l1_dist
from model_sampling import _l1_dist, CAREGNN, CARESampler
from sklearn.metrics import recall_score, roc_auc_score
from torch.nn.functional import softmax
from utils import EarlyStopping
import dgl
def evaluate(model, loss_fn, dataloader, device="cpu"):
loss = 0
......
import dgl.function as fn
import numpy as np
import torch as th
import torch.nn as nn
import dgl.function as fn
class CAREConv(nn.Module):
"""One layer of CARE-GNN."""
......
import dgl
import dgl.function as fn
import numpy as np
import torch as th
import torch.nn as nn
import dgl
import dgl.function as fn
def _l1_dist(edges):
# formula 2
......
import time
import dgl
import dgl.nn as dglnn
import numpy as np
import torch
import torch.nn as nn
......@@ -7,9 +10,6 @@ import torch.nn.functional as F
import torchmetrics.functional as MF
from ogb.nodeproppred import DglNodePropPredDataset
import dgl
import dgl.nn as dglnn
class SAGE(nn.Module):
def __init__(self, in_feats, n_hidden, n_classes):
......
from collections import defaultdict as ddict
import dgl
import numpy as np
import torch
from ordered_set import OrderedSet
from torch.utils.data import DataLoader, Dataset
import dgl
class TrainDataset(Dataset):
"""
......
import argparse
from time import time
import dgl.function as fn
import numpy as np
import torch as th
import torch.nn as nn
......@@ -10,8 +12,6 @@ from data_loader import Data
from models import CompGCN_ConvE
from utils import in_out_norm
import dgl.function as fn
# predict the tail for (head, rel, -1) or head for (-1, rel, tail)
def predict(model, graph, device, data_iter, split="valid", mode="tail"):
......@@ -96,7 +96,6 @@ def evaluate(model, graph, device, data_iter, split="valid"):
def main(args):
# Step 1: Prepare graph data and retrieve train/validation/test index ============================= #
# check cuda
if args.gpu >= 0 and th.cuda.is_available():
......
import dgl
import dgl.function as fn
import torch as th
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from utils import ccorr
import dgl
import dgl.function as fn
class CompGraphConv(nn.Module):
"""One layer of CompGCN."""
......@@ -41,7 +40,6 @@ class CompGraphConv(nn.Module):
nn.init.xavier_normal_(self.loop_rel)
def forward(self, g, n_in_feats, r_feats):
with g.local_scope():
# Assign values to source nodes. In a homogeneous graph, this is equal to
# assigning them to all nodes.
......
......@@ -2,9 +2,8 @@
# <https://github.com/malllabiisc/CompGCN/blob/master/helper.py>.
# It implements the operation of circular convolution in the ccorr function and an additional in_out_norm function for norm computation.
import torch as th
import dgl
import torch as th
def com_mult(a, b):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment