Unverified Commit 334e6434 authored by Mufei Li's avatar Mufei Li Committed by GitHub
Browse files

[DGL-Go] CI for DGL-Go (#3959)

* Update

* Update

* Fix

* Update

* CI

* Update

* Update

* Update

* Update

* Update

* Update

* Update

* Update

* Update

* Update

* Update

* Update
parent 44dba197
......@@ -94,6 +94,14 @@ def tutorial_test_linux(backend) {
}
}
def go_test_linux() {
init_git()
unpack_lib('dgl-cpu-linux', dgl_linux_libs)
timeout(time: 20, unit: 'MINUTES') {
sh "bash tests/scripts/task_go_test.sh"
}
}
def is_authorized(name) {
def authorized_user = ['VoVAllen', 'BarclayII', 'jermainewang', 'zheng-da', 'mufeili', 'Rhett-Ying', 'isratnisa']
return (name in authorized_user)
......@@ -363,6 +371,11 @@ pipeline {
tutorial_test_linux('pytorch')
}
}
stage('DGL-Go CPU test') {
steps {
go_test_linux()
}
}
}
post {
always {
......
from typing import List
import torch
import torch.nn as nn
import dgl.function as fn
import torch.nn.functional as F
from dgl.nn import GATConv
from dgl.base import dgl_warning
......@@ -39,7 +38,7 @@ class GAT(nn.Module):
Dropout rate for features.
attn_drop : float
Dropout rate for attentions.
negative_slope: float
negative_slope : float
Negative slope for leaky relu in GATConv
residual : bool
If true, the GATConv will use residule connection
......@@ -61,17 +60,16 @@ class GAT(nn.Module):
in_hidden = hidden_size*heads[i-1] if i > 0 else in_size
out_hidden = hidden_size if i < num_layers - \
1 else data_info["out_size"]
use_residual = i == num_layers
activation = None if i == num_layers else self.activation
activation = None if i == num_layers - 1 else self.activation
self.gat_layers.append(GATConv(
in_hidden, out_hidden, heads[i],
feat_drop, attn_drop, negative_slope, use_residual, activation))
feat_drop, attn_drop, negative_slope, residual, activation))
def forward(self, graph, node_feat, edge_feat=None):
if self.embed_size > 0:
dgl_warning(
"The embedding for node feature is used, and input node_feat is ignored, due to the provided embed_size.", norepeat=True)
"The embedding for node feature is used, and input node_feat is ignored, due to the provided embed_size.")
h = self.embed.weight
else:
h = node_feat
......
......@@ -56,7 +56,7 @@ class GCN(nn.Module):
def forward(self, g, node_feat, edge_feat = None):
if self.embed_size > 0:
dgl_warning("The embedding for node feature is used, and input node_feat is ignored, due to the provided embed_size.", norepeat=True)
dgl_warning("The embedding for node feature is used, and input node_feat is ignored, due to the provided embed_size.")
h = self.embed.weight
else:
h = node_feat
......
......@@ -44,12 +44,12 @@ class GIN(nn.Module):
nn.Linear(hidden_size, hidden_size), nn.ReLU())
self.conv_list.append(GINConv(mlp, aggregator_type, 1e-5, True))
self.out_mlp = nn.Linear(hidden_size, self.out_size)
self.out_mlp = nn.Linear(hidden_size, data_info["out_size"])
def forward(self, graph, node_feat, edge_feat=None):
if self.embed_size > 0:
dgl_warning(
"The embedding for node feature is used, and input node_feat is ignored, due to the provided embed_size.", norepeat=True)
"The embedding for node feature is used, and input node_feat is ignored, due to the provided embed_size.")
h = self.embed.weight
else:
h = node_feat
......
......@@ -49,7 +49,7 @@ class GraphSAGE(nn.Module):
def forward(self, graph, node_feat, edge_feat = None):
if self.embed_size > 0:
dgl_warning("The embedding for node feature is used, and input node_feat is ignored, due to the provided embed_size.", norepeat=True)
dgl_warning("The embedding for node feature is used, and input node_feat is ignored, due to the provided embed_size.")
h = self.embed.weight
else:
h = node_feat
......
import torch
import torch.nn as nn
import dgl.function as fn
import torch.nn.functional as F
......@@ -30,16 +28,18 @@ class SGC(nn.Module):
super().__init__()
self.data_info = data_info
self.out_size = data_info["out_size"]
self.in_size = data_info["in_size"]
self.embed_size = embed_size
if embed_size > 0:
self.embed = nn.Embedding(data_info["num_nodes"], embed_size)
self.sgc = SGConv(self.in_size, self.out_size, k=k, cached=True,
in_size = embed_size
else:
in_size = data_info["in_size"]
self.sgc = SGConv(in_size, self.out_size, k=k, cached=True,
bias=bias, norm=self.normalize)
def forward(self, g, node_feat, edge_feat=None):
if self.embed_size > 0:
dgl_warning("The embedding for node feature is used, and input node_feat is ignored, due to the provided embed_size.", norepeat=True)
dgl_warning("The embedding for node feature is used, and input node_feat is ignored, due to the provided embed_size.")
h = self.embed.weight
else:
h = node_feat
......
import pytest
import torch
from dglgo.model import *
from test_utils.graph_cases import get_cases
@pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature']))
def test_gcn(g):
data_info = {
'num_nodes': g.num_nodes(),
'out_size': 7
}
node_feat = None
edge_feat = g.edata['scalar_w']
# node embedding + not use_edge_weight
model = GCN(data_info, embed_size=10, use_edge_weight=False)
model(g, node_feat)
# node embedding + use_edge_weight
model = GCN(data_info, embed_size=10, use_edge_weight=True)
model(g, node_feat, edge_feat)
data_info['in_size'] = g.ndata['h'].shape[-1]
node_feat = g.ndata['h']
# node feat + not use_edge_weight
model = GCN(data_info, embed_size=-1, use_edge_weight=False)
model(g, node_feat)
# node feat + use_edge_weight
model = GCN(data_info, embed_size=-1, use_edge_weight=True)
model(g, node_feat, edge_feat)
@pytest.mark.parametrize('g', get_cases(['block-bipartite']))
def test_gcn_block(g):
data_info = {
'in_size': 10,
'out_size': 7
}
blocks = [g]
node_feat = torch.randn(g.num_src_nodes(), data_info['in_size'])
edge_feat = torch.abs(torch.randn(g.num_edges()))
# not use_edge_weight
model = GCN(data_info, use_edge_weight=False)
model.forward_block(blocks, node_feat)
# use_edge_weight
model = GCN(data_info, use_edge_weight=True)
model.forward_block(blocks, node_feat, edge_feat)
@pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature']))
def test_gat(g):
data_info = {
'num_nodes': g.num_nodes(),
'out_size': 7
}
node_feat = None
# node embedding
model = GAT(data_info, embed_size=10)
model(g, node_feat)
# node feat
data_info['in_size'] = g.ndata['h'].shape[-1]
node_feat = g.ndata['h']
model = GAT(data_info, embed_size=-1)
model(g, node_feat)
@pytest.mark.parametrize('g', get_cases(['block-bipartite']))
def test_gat_block(g):
data_info = {
'in_size': 10,
'out_size': 7
}
blocks = [g]
node_feat = torch.randn(g.num_src_nodes(), data_info['in_size'])
model = GAT(data_info, num_layers=1, heads=[8])
model.forward_block(blocks, node_feat)
@pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature']))
def test_gin(g):
data_info = {
'num_nodes': g.num_nodes(),
'out_size': 7
}
node_feat = None
# node embedding
model = GIN(data_info, embed_size=10)
model(g, node_feat)
# node feat
data_info['in_size'] = g.ndata['h'].shape[-1]
node_feat = g.ndata['h']
model = GIN(data_info, embed_size=-1)
model(g, node_feat)
@pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature']))
def test_sage(g):
data_info = {
'num_nodes': g.num_nodes(),
'out_size': 7
}
node_feat = None
edge_feat = g.edata['scalar_w']
# node embedding
model = GraphSAGE(data_info, embed_size=10)
model(g, node_feat)
model(g, node_feat, edge_feat)
# node feat
data_info['in_size'] = g.ndata['h'].shape[-1]
node_feat = g.ndata['h']
model = GraphSAGE(data_info, embed_size=-1)
model(g, node_feat)
model(g, node_feat, edge_feat)
@pytest.mark.parametrize('g', get_cases(['block-bipartite']))
def test_sage_block(g):
data_info = {
'in_size': 10,
'out_size': 7
}
blocks = [g]
node_feat = torch.randn(g.num_src_nodes(), data_info['in_size'])
edge_feat = torch.abs(torch.randn(g.num_edges()))
model = GraphSAGE(data_info, embed_size=-1)
model.forward_block(blocks, node_feat)
model.forward_block(blocks, node_feat, edge_feat)
@pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature']))
def test_sgc(g):
data_info = {
'num_nodes': g.num_nodes(),
'out_size': 7
}
node_feat = None
# node embedding
model = SGC(data_info, embed_size=10)
model(g, node_feat)
# node feat
data_info['in_size'] = g.ndata['h'].shape[-1]
node_feat = g.ndata['h']
model = SGC(data_info, embed_size=-1)
model(g, node_feat)
def test_bilinear():
data_info = {
'in_size': 10,
'out_size': 1
}
model = BilinearPredictor(data_info)
num_pairs = 10
h_src = torch.randn(num_pairs, data_info['in_size'])
h_dst = torch.randn(num_pairs, data_info['in_size'])
model(h_src, h_dst)
def test_ele():
data_info = {
'in_size': 10,
'out_size': 1
}
model = ElementWiseProductPredictor(data_info)
num_pairs = 10
h_src = torch.randn(num_pairs, data_info['in_size'])
h_dst = torch.randn(num_pairs, data_info['in_size'])
model(h_src, h_dst)
import os
import pytest
@pytest.mark.parametrize('data', ['cora', 'citeseer', 'pubmed', 'csv', 'reddit',
'co-buy-computer', 'ogbn-arxiv', 'ogbn-products'])
@pytest.mark.parametrize('model', ['gcn', 'gat', 'sage', 'sgc', 'gin'])
def test_nodepred(data, model):
os.system('dgl configure nodepred --data {} --model {}'.format(data, model))
assert os.path.exists('nodepred_{}_{}.yaml'.format(data, model))
custom_config_file = 'custom_{}_{}.yaml'.format(data, model)
os.system('dgl configure nodepred --data {} --model {} --cfg {}'.format(data, model,
custom_config_file))
assert os.path.exists(custom_config_file)
custom_script = '_'.join([data, model]) + '.py'
os.system('dgl export --cfg {} --output {}'.format(custom_config_file, custom_script))
assert os.path.exists(custom_script)
@pytest.mark.parametrize('data', ['cora', 'citeseer', 'pubmed', 'csv', 'reddit',
'co-buy-computer', 'ogbn-arxiv', 'ogbn-products'])
@pytest.mark.parametrize('model', ['gcn', 'gat', 'sage'])
def test_nodepred_ns(data, model):
os.system('dgl configure nodepred-ns --data {} --model {}'.format(data, model))
assert os.path.exists('nodepred-ns_{}_{}.yaml'.format(data, model))
custom_config_file = 'custom_{}_{}.yaml'.format(data, model)
os.system('dgl configure nodepred-ns --data {} --model {} --cfg {}'.format(data, model,
custom_config_file))
assert os.path.exists(custom_config_file)
custom_script = '_'.join([data, model]) + '.py'
os.system('dgl export --cfg {} --output {}'.format(custom_config_file, custom_script))
assert os.path.exists(custom_script)
@pytest.mark.parametrize('data', ['cora', 'citeseer', 'pubmed', 'csv', 'reddit',
'co-buy-computer', 'ogbn-arxiv', 'ogbn-products', 'ogbl-collab',
'ogbl-citation2'])
@pytest.mark.parametrize('node_model', ['gcn' ,'gat', 'sage', 'sgc', 'gin'])
@pytest.mark.parametrize('edge_model', ['ele', 'bilinear'])
@pytest.mark.parametrize('neg_sampler', ['global', 'persource'])
def test_linkpred(data, node_model, edge_model, neg_sampler):
custom_config_file = '_'.join([data, node_model, edge_model, neg_sampler]) + '.yaml'
os.system('dgl configure linkpred --data {} --node-model {} --edge-model {} --neg-sampler {} --cfg {}'.format(
data, node_model, edge_model, neg_sampler, custom_config_file))
assert os.path.exists(custom_config_file)
custom_script = '_'.join([data, node_model, edge_model, neg_sampler]) + '.py'
os.system('dgl export --cfg {} --output {}'.format(custom_config_file, custom_script))
assert os.path.exists(custom_script)
@pytest.mark.parametrize('data', ['cora', 'citeseer', 'pubmed', 'csv', 'reddit',
'co-buy-computer', 'ogbn-arxiv', 'ogbn-products', 'ogbl-collab',
'ogbl-citation2'])
@pytest.mark.parametrize('node_model', ['gcn' ,'gat', 'sage', 'sgc', 'gin'])
@pytest.mark.parametrize('edge_model', ['ele', 'bilinear'])
def test_linkpred_default_neg_sampler(data, node_model, edge_model):
custom_config_file = '_'.join([data, node_model, edge_model]) + '.yaml'
os.system('dgl configure linkpred --data {} --node-model {} --edge-model {} --cfg {}'.format(
data, node_model, edge_model, custom_config_file))
assert os.path.exists(custom_config_file)
@pytest.mark.parametrize('recipe',
['linkpred_cora_sage.yaml',
'linkpred_citation2_sage.yaml',
'linkpred_collab_sage.yaml',
'nodepred_citeseer_gat.yaml',
'nodepred_citeseer_gcn.yaml',
'nodepred_citeseer_sage.yaml',
'nodepred_cora_gat.yaml',
'nodepred_cora_gcn.yaml',
'nodepred_cora_sage.yaml',
'nodepred_pubmed_gat.yaml',
'nodepred_pubmed_gcn.yaml',
'nodepred_pubmed_sage.yaml',
'nodepred-ns_arxiv_gcn.yaml',
'nodepred-ns_product_sage.yaml'])
def test_recipe(recipe):
# Remove all generated yaml files
current_dir = os.listdir("./")
for item in current_dir:
if item.endswith(".yaml"):
os.remove(item)
os.system('dgl recipe get {}'.format(recipe))
assert os.path.exists(recipe)
def test_node_cora():
os.system('dgl configure nodepred --data cora --model gcn')
os.system('dgl train --cfg nodepred_cora_gcn.yaml')
assert os.path.exists('checkpoint.pth')
assert os.path.exists('model.pth')
#!/bin/bash
. /opt/conda/etc/profile.d/conda.sh
function fail {
echo FAIL: $@
exit -1
}
export DGLBACKEND=pytorch
export DGL_LIBRARY_PATH=${PWD}/build
export PYTHONPATH=tests:${PWD}/python:$PYTHONPATH
export DGL_DOWNLOAD_DIR=${PWD}
conda activate pytorch-ci
pushd dglgo
rm -rf build *.egg-info dist
pip uninstall -y dglgo
python3 setup.py install
popd
export LC_ALL=C.UTF-8
export LANG=C.UTF-8
python3 -m pytest -v --junitxml=pytest_go.xml tests/go || fail "go"
export PYTHONUNBUFFERED=1
export OMP_NUM_THREADS=1
export DMLC_LOG_DEBUG=1
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment