"src/git@developer.sourcefind.cn:OpenDAS/dgl.git" did not exist on "0114f4fd79ce8552533b063b5a75ac9c2a3f9b54"
Unverified Commit 334e6434 authored by Mufei Li's avatar Mufei Li Committed by GitHub
Browse files

[DGL-Go] CI for DGL-Go (#3959)

* Update

* Update

* Fix

* Update

* CI

* Update

* Update

* Update

* Update

* Update

* Update

* Update

* Update

* Update

* Update

* Update

* Update
parent 44dba197
...@@ -94,6 +94,14 @@ def tutorial_test_linux(backend) { ...@@ -94,6 +94,14 @@ def tutorial_test_linux(backend) {
} }
} }
def go_test_linux() {
init_git()
unpack_lib('dgl-cpu-linux', dgl_linux_libs)
timeout(time: 20, unit: 'MINUTES') {
sh "bash tests/scripts/task_go_test.sh"
}
}
def is_authorized(name) { def is_authorized(name) {
def authorized_user = ['VoVAllen', 'BarclayII', 'jermainewang', 'zheng-da', 'mufeili', 'Rhett-Ying', 'isratnisa'] def authorized_user = ['VoVAllen', 'BarclayII', 'jermainewang', 'zheng-da', 'mufeili', 'Rhett-Ying', 'isratnisa']
return (name in authorized_user) return (name in authorized_user)
...@@ -177,7 +185,7 @@ pipeline { ...@@ -177,7 +185,7 @@ pipeline {
agent { agent {
docker { docker {
label "linux-cpu-node" label "linux-cpu-node"
image "dgllib/dgl-ci-lint" image "dgllib/dgl-ci-lint"
alwaysPull true alwaysPull true
} }
} }
...@@ -191,14 +199,14 @@ pipeline { ...@@ -191,14 +199,14 @@ pipeline {
} }
} }
} }
stage('Build') { stage('Build') {
parallel { parallel {
stage('CPU Build') { stage('CPU Build') {
agent { agent {
docker { docker {
label "linux-cpu-node" label "linux-cpu-node"
image "dgllib/dgl-ci-cpu:cu101_v220217" image "dgllib/dgl-ci-cpu:cu101_v220217"
args "-u root" args "-u root"
alwaysPull true alwaysPull true
} }
...@@ -216,7 +224,7 @@ pipeline { ...@@ -216,7 +224,7 @@ pipeline {
agent { agent {
docker { docker {
label "linux-cpu-node" label "linux-cpu-node"
image "dgllib/dgl-ci-gpu:cu101_v220217" image "dgllib/dgl-ci-gpu:cu101_v220217"
args "-u root" args "-u root"
alwaysPull true alwaysPull true
} }
...@@ -253,7 +261,7 @@ pipeline { ...@@ -253,7 +261,7 @@ pipeline {
agent { agent {
docker { docker {
label "linux-cpu-node" label "linux-cpu-node"
image "dgllib/dgl-ci-cpu:cu101_v220217" image "dgllib/dgl-ci-cpu:cu101_v220217"
alwaysPull true alwaysPull true
} }
} }
...@@ -270,7 +278,7 @@ pipeline { ...@@ -270,7 +278,7 @@ pipeline {
agent { agent {
docker { docker {
label "linux-gpu-node" label "linux-gpu-node"
image "dgllib/dgl-ci-gpu:cu101_v220217" image "dgllib/dgl-ci-gpu:cu101_v220217"
args "--runtime nvidia" args "--runtime nvidia"
alwaysPull true alwaysPull true
} }
...@@ -299,7 +307,7 @@ pipeline { ...@@ -299,7 +307,7 @@ pipeline {
agent { agent {
docker { docker {
label "linux-cpu-node" label "linux-cpu-node"
image "dgllib/dgl-ci-cpu:cu101_v220217" image "dgllib/dgl-ci-cpu:cu101_v220217"
alwaysPull true alwaysPull true
} }
} }
...@@ -320,7 +328,7 @@ pipeline { ...@@ -320,7 +328,7 @@ pipeline {
agent { agent {
docker { docker {
label "linux-gpu-node" label "linux-gpu-node"
image "dgllib/dgl-ci-gpu:cu101_v220217" image "dgllib/dgl-ci-gpu:cu101_v220217"
args "--runtime nvidia" args "--runtime nvidia"
alwaysPull true alwaysPull true
} }
...@@ -342,7 +350,7 @@ pipeline { ...@@ -342,7 +350,7 @@ pipeline {
agent { agent {
docker { docker {
label "linux-cpu-node" label "linux-cpu-node"
image "dgllib/dgl-ci-cpu:cu101_v220217" image "dgllib/dgl-ci-cpu:cu101_v220217"
args "--shm-size=4gb" args "--shm-size=4gb"
alwaysPull true alwaysPull true
} }
...@@ -363,6 +371,11 @@ pipeline { ...@@ -363,6 +371,11 @@ pipeline {
tutorial_test_linux('pytorch') tutorial_test_linux('pytorch')
} }
} }
stage('DGL-Go CPU test') {
steps {
go_test_linux()
}
}
} }
post { post {
always { always {
...@@ -394,7 +407,7 @@ pipeline { ...@@ -394,7 +407,7 @@ pipeline {
agent { agent {
docker { docker {
label "linux-gpu-node" label "linux-gpu-node"
image "dgllib/dgl-ci-gpu:cu101_v220217" image "dgllib/dgl-ci-gpu:cu101_v220217"
args "--runtime nvidia --shm-size=8gb" args "--runtime nvidia --shm-size=8gb"
alwaysPull true alwaysPull true
} }
...@@ -422,7 +435,7 @@ pipeline { ...@@ -422,7 +435,7 @@ pipeline {
agent { agent {
docker { docker {
label "linux-cpu-node" label "linux-cpu-node"
image "dgllib/dgl-ci-cpu:cu101_v220217" image "dgllib/dgl-ci-cpu:cu101_v220217"
alwaysPull true alwaysPull true
} }
} }
...@@ -448,7 +461,7 @@ pipeline { ...@@ -448,7 +461,7 @@ pipeline {
agent { agent {
docker { docker {
label "linux-gpu-node" label "linux-gpu-node"
image "dgllib/dgl-ci-gpu:cu101_v220217" image "dgllib/dgl-ci-gpu:cu101_v220217"
args "--runtime nvidia" args "--runtime nvidia"
alwaysPull true alwaysPull true
} }
......
from typing import List from typing import List
import torch import torch
import torch.nn as nn import torch.nn as nn
import dgl.function as fn
import torch.nn.functional as F import torch.nn.functional as F
from dgl.nn import GATConv from dgl.nn import GATConv
from dgl.base import dgl_warning from dgl.base import dgl_warning
...@@ -39,7 +38,7 @@ class GAT(nn.Module): ...@@ -39,7 +38,7 @@ class GAT(nn.Module):
Dropout rate for features. Dropout rate for features.
attn_drop : float attn_drop : float
Dropout rate for attentions. Dropout rate for attentions.
negative_slope: float negative_slope : float
Negative slope for leaky relu in GATConv Negative slope for leaky relu in GATConv
residual : bool residual : bool
If true, the GATConv will use residule connection If true, the GATConv will use residule connection
...@@ -61,17 +60,16 @@ class GAT(nn.Module): ...@@ -61,17 +60,16 @@ class GAT(nn.Module):
in_hidden = hidden_size*heads[i-1] if i > 0 else in_size in_hidden = hidden_size*heads[i-1] if i > 0 else in_size
out_hidden = hidden_size if i < num_layers - \ out_hidden = hidden_size if i < num_layers - \
1 else data_info["out_size"] 1 else data_info["out_size"]
use_residual = i == num_layers activation = None if i == num_layers - 1 else self.activation
activation = None if i == num_layers else self.activation
self.gat_layers.append(GATConv( self.gat_layers.append(GATConv(
in_hidden, out_hidden, heads[i], in_hidden, out_hidden, heads[i],
feat_drop, attn_drop, negative_slope, use_residual, activation)) feat_drop, attn_drop, negative_slope, residual, activation))
def forward(self, graph, node_feat, edge_feat=None): def forward(self, graph, node_feat, edge_feat=None):
if self.embed_size > 0: if self.embed_size > 0:
dgl_warning( dgl_warning(
"The embedding for node feature is used, and input node_feat is ignored, due to the provided embed_size.", norepeat=True) "The embedding for node feature is used, and input node_feat is ignored, due to the provided embed_size.")
h = self.embed.weight h = self.embed.weight
else: else:
h = node_feat h = node_feat
......
...@@ -44,7 +44,7 @@ class GCN(nn.Module): ...@@ -44,7 +44,7 @@ class GCN(nn.Module):
in_size = embed_size in_size = embed_size
else: else:
in_size = data_info["in_size"] in_size = data_info["in_size"]
for i in range(num_layers): for i in range(num_layers):
in_hidden = hidden_size if i > 0 else in_size in_hidden = hidden_size if i > 0 else in_size
out_hidden = hidden_size if i < num_layers - 1 else data_info["out_size"] out_hidden = hidden_size if i < num_layers - 1 else data_info["out_size"]
...@@ -56,7 +56,7 @@ class GCN(nn.Module): ...@@ -56,7 +56,7 @@ class GCN(nn.Module):
def forward(self, g, node_feat, edge_feat = None): def forward(self, g, node_feat, edge_feat = None):
if self.embed_size > 0: if self.embed_size > 0:
dgl_warning("The embedding for node feature is used, and input node_feat is ignored, due to the provided embed_size.", norepeat=True) dgl_warning("The embedding for node feature is used, and input node_feat is ignored, due to the provided embed_size.")
h = self.embed.weight h = self.embed.weight
else: else:
h = node_feat h = node_feat
......
...@@ -44,12 +44,12 @@ class GIN(nn.Module): ...@@ -44,12 +44,12 @@ class GIN(nn.Module):
nn.Linear(hidden_size, hidden_size), nn.ReLU()) nn.Linear(hidden_size, hidden_size), nn.ReLU())
self.conv_list.append(GINConv(mlp, aggregator_type, 1e-5, True)) self.conv_list.append(GINConv(mlp, aggregator_type, 1e-5, True))
self.out_mlp = nn.Linear(hidden_size, self.out_size) self.out_mlp = nn.Linear(hidden_size, data_info["out_size"])
def forward(self, graph, node_feat, edge_feat=None): def forward(self, graph, node_feat, edge_feat=None):
if self.embed_size > 0: if self.embed_size > 0:
dgl_warning( dgl_warning(
"The embedding for node feature is used, and input node_feat is ignored, due to the provided embed_size.", norepeat=True) "The embedding for node feature is used, and input node_feat is ignored, due to the provided embed_size.")
h = self.embed.weight h = self.embed.weight
else: else:
h = node_feat h = node_feat
......
...@@ -10,7 +10,7 @@ class GraphSAGE(nn.Module): ...@@ -10,7 +10,7 @@ class GraphSAGE(nn.Module):
num_layers: int = 1, num_layers: int = 1,
activation: str = "relu", activation: str = "relu",
dropout: float = 0.5, dropout: float = 0.5,
aggregator_type: str = "gcn"): aggregator_type: str = "gcn"):
"""GraphSAGE model """GraphSAGE model
Parameters Parameters
...@@ -41,7 +41,7 @@ class GraphSAGE(nn.Module): ...@@ -41,7 +41,7 @@ class GraphSAGE(nn.Module):
self.layers = nn.ModuleList() self.layers = nn.ModuleList()
self.dropout = nn.Dropout(dropout) self.dropout = nn.Dropout(dropout)
self.activation = getattr(nn.functional, activation) self.activation = getattr(nn.functional, activation)
for i in range(num_layers): for i in range(num_layers):
in_hidden = hidden_size if i > 0 else in_size in_hidden = hidden_size if i > 0 else in_size
out_hidden = hidden_size if i < num_layers - 1 else data_info["out_size"] out_hidden = hidden_size if i < num_layers - 1 else data_info["out_size"]
...@@ -49,7 +49,7 @@ class GraphSAGE(nn.Module): ...@@ -49,7 +49,7 @@ class GraphSAGE(nn.Module):
def forward(self, graph, node_feat, edge_feat = None): def forward(self, graph, node_feat, edge_feat = None):
if self.embed_size > 0: if self.embed_size > 0:
dgl_warning("The embedding for node feature is used, and input node_feat is ignored, due to the provided embed_size.", norepeat=True) dgl_warning("The embedding for node feature is used, and input node_feat is ignored, due to the provided embed_size.")
h = self.embed.weight h = self.embed.weight
else: else:
h = node_feat h = node_feat
...@@ -60,7 +60,7 @@ class GraphSAGE(nn.Module): ...@@ -60,7 +60,7 @@ class GraphSAGE(nn.Module):
h = self.activation(h) h = self.activation(h)
h = self.dropout(h) h = self.dropout(h)
return h return h
def forward_block(self, blocks, node_feat, edge_feat = None): def forward_block(self, blocks, node_feat, edge_feat = None):
h = node_feat h = node_feat
for l, (layer, block) in enumerate(zip(self.layers, blocks)): for l, (layer, block) in enumerate(zip(self.layers, blocks)):
......
import torch
import torch.nn as nn import torch.nn as nn
import dgl.function as fn import dgl.function as fn
import torch.nn.functional as F import torch.nn.functional as F
...@@ -30,16 +28,18 @@ class SGC(nn.Module): ...@@ -30,16 +28,18 @@ class SGC(nn.Module):
super().__init__() super().__init__()
self.data_info = data_info self.data_info = data_info
self.out_size = data_info["out_size"] self.out_size = data_info["out_size"]
self.in_size = data_info["in_size"]
self.embed_size = embed_size self.embed_size = embed_size
if embed_size > 0: if embed_size > 0:
self.embed = nn.Embedding(data_info["num_nodes"], embed_size) self.embed = nn.Embedding(data_info["num_nodes"], embed_size)
self.sgc = SGConv(self.in_size, self.out_size, k=k, cached=True, in_size = embed_size
else:
in_size = data_info["in_size"]
self.sgc = SGConv(in_size, self.out_size, k=k, cached=True,
bias=bias, norm=self.normalize) bias=bias, norm=self.normalize)
def forward(self, g, node_feat, edge_feat=None): def forward(self, g, node_feat, edge_feat=None):
if self.embed_size > 0: if self.embed_size > 0:
dgl_warning("The embedding for node feature is used, and input node_feat is ignored, due to the provided embed_size.", norepeat=True) dgl_warning("The embedding for node feature is used, and input node_feat is ignored, due to the provided embed_size.")
h = self.embed.weight h = self.embed.weight
else: else:
h = node_feat h = node_feat
......
import pytest
import torch
from dglgo.model import *
from test_utils.graph_cases import get_cases
@pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature']))
def test_gcn(g):
data_info = {
'num_nodes': g.num_nodes(),
'out_size': 7
}
node_feat = None
edge_feat = g.edata['scalar_w']
# node embedding + not use_edge_weight
model = GCN(data_info, embed_size=10, use_edge_weight=False)
model(g, node_feat)
# node embedding + use_edge_weight
model = GCN(data_info, embed_size=10, use_edge_weight=True)
model(g, node_feat, edge_feat)
data_info['in_size'] = g.ndata['h'].shape[-1]
node_feat = g.ndata['h']
# node feat + not use_edge_weight
model = GCN(data_info, embed_size=-1, use_edge_weight=False)
model(g, node_feat)
# node feat + use_edge_weight
model = GCN(data_info, embed_size=-1, use_edge_weight=True)
model(g, node_feat, edge_feat)
@pytest.mark.parametrize('g', get_cases(['block-bipartite']))
def test_gcn_block(g):
data_info = {
'in_size': 10,
'out_size': 7
}
blocks = [g]
node_feat = torch.randn(g.num_src_nodes(), data_info['in_size'])
edge_feat = torch.abs(torch.randn(g.num_edges()))
# not use_edge_weight
model = GCN(data_info, use_edge_weight=False)
model.forward_block(blocks, node_feat)
# use_edge_weight
model = GCN(data_info, use_edge_weight=True)
model.forward_block(blocks, node_feat, edge_feat)
@pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature']))
def test_gat(g):
data_info = {
'num_nodes': g.num_nodes(),
'out_size': 7
}
node_feat = None
# node embedding
model = GAT(data_info, embed_size=10)
model(g, node_feat)
# node feat
data_info['in_size'] = g.ndata['h'].shape[-1]
node_feat = g.ndata['h']
model = GAT(data_info, embed_size=-1)
model(g, node_feat)
@pytest.mark.parametrize('g', get_cases(['block-bipartite']))
def test_gat_block(g):
data_info = {
'in_size': 10,
'out_size': 7
}
blocks = [g]
node_feat = torch.randn(g.num_src_nodes(), data_info['in_size'])
model = GAT(data_info, num_layers=1, heads=[8])
model.forward_block(blocks, node_feat)
@pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature']))
def test_gin(g):
data_info = {
'num_nodes': g.num_nodes(),
'out_size': 7
}
node_feat = None
# node embedding
model = GIN(data_info, embed_size=10)
model(g, node_feat)
# node feat
data_info['in_size'] = g.ndata['h'].shape[-1]
node_feat = g.ndata['h']
model = GIN(data_info, embed_size=-1)
model(g, node_feat)
@pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature']))
def test_sage(g):
data_info = {
'num_nodes': g.num_nodes(),
'out_size': 7
}
node_feat = None
edge_feat = g.edata['scalar_w']
# node embedding
model = GraphSAGE(data_info, embed_size=10)
model(g, node_feat)
model(g, node_feat, edge_feat)
# node feat
data_info['in_size'] = g.ndata['h'].shape[-1]
node_feat = g.ndata['h']
model = GraphSAGE(data_info, embed_size=-1)
model(g, node_feat)
model(g, node_feat, edge_feat)
@pytest.mark.parametrize('g', get_cases(['block-bipartite']))
def test_sage_block(g):
data_info = {
'in_size': 10,
'out_size': 7
}
blocks = [g]
node_feat = torch.randn(g.num_src_nodes(), data_info['in_size'])
edge_feat = torch.abs(torch.randn(g.num_edges()))
model = GraphSAGE(data_info, embed_size=-1)
model.forward_block(blocks, node_feat)
model.forward_block(blocks, node_feat, edge_feat)
@pytest.mark.parametrize('g', get_cases(['has_scalar_e_feature']))
def test_sgc(g):
data_info = {
'num_nodes': g.num_nodes(),
'out_size': 7
}
node_feat = None
# node embedding
model = SGC(data_info, embed_size=10)
model(g, node_feat)
# node feat
data_info['in_size'] = g.ndata['h'].shape[-1]
node_feat = g.ndata['h']
model = SGC(data_info, embed_size=-1)
model(g, node_feat)
def test_bilinear():
data_info = {
'in_size': 10,
'out_size': 1
}
model = BilinearPredictor(data_info)
num_pairs = 10
h_src = torch.randn(num_pairs, data_info['in_size'])
h_dst = torch.randn(num_pairs, data_info['in_size'])
model(h_src, h_dst)
def test_ele():
data_info = {
'in_size': 10,
'out_size': 1
}
model = ElementWiseProductPredictor(data_info)
num_pairs = 10
h_src = torch.randn(num_pairs, data_info['in_size'])
h_dst = torch.randn(num_pairs, data_info['in_size'])
model(h_src, h_dst)
import os
import pytest
@pytest.mark.parametrize('data', ['cora', 'citeseer', 'pubmed', 'csv', 'reddit',
'co-buy-computer', 'ogbn-arxiv', 'ogbn-products'])
@pytest.mark.parametrize('model', ['gcn', 'gat', 'sage', 'sgc', 'gin'])
def test_nodepred(data, model):
os.system('dgl configure nodepred --data {} --model {}'.format(data, model))
assert os.path.exists('nodepred_{}_{}.yaml'.format(data, model))
custom_config_file = 'custom_{}_{}.yaml'.format(data, model)
os.system('dgl configure nodepred --data {} --model {} --cfg {}'.format(data, model,
custom_config_file))
assert os.path.exists(custom_config_file)
custom_script = '_'.join([data, model]) + '.py'
os.system('dgl export --cfg {} --output {}'.format(custom_config_file, custom_script))
assert os.path.exists(custom_script)
@pytest.mark.parametrize('data', ['cora', 'citeseer', 'pubmed', 'csv', 'reddit',
'co-buy-computer', 'ogbn-arxiv', 'ogbn-products'])
@pytest.mark.parametrize('model', ['gcn', 'gat', 'sage'])
def test_nodepred_ns(data, model):
os.system('dgl configure nodepred-ns --data {} --model {}'.format(data, model))
assert os.path.exists('nodepred-ns_{}_{}.yaml'.format(data, model))
custom_config_file = 'custom_{}_{}.yaml'.format(data, model)
os.system('dgl configure nodepred-ns --data {} --model {} --cfg {}'.format(data, model,
custom_config_file))
assert os.path.exists(custom_config_file)
custom_script = '_'.join([data, model]) + '.py'
os.system('dgl export --cfg {} --output {}'.format(custom_config_file, custom_script))
assert os.path.exists(custom_script)
@pytest.mark.parametrize('data', ['cora', 'citeseer', 'pubmed', 'csv', 'reddit',
'co-buy-computer', 'ogbn-arxiv', 'ogbn-products', 'ogbl-collab',
'ogbl-citation2'])
@pytest.mark.parametrize('node_model', ['gcn' ,'gat', 'sage', 'sgc', 'gin'])
@pytest.mark.parametrize('edge_model', ['ele', 'bilinear'])
@pytest.mark.parametrize('neg_sampler', ['global', 'persource'])
def test_linkpred(data, node_model, edge_model, neg_sampler):
custom_config_file = '_'.join([data, node_model, edge_model, neg_sampler]) + '.yaml'
os.system('dgl configure linkpred --data {} --node-model {} --edge-model {} --neg-sampler {} --cfg {}'.format(
data, node_model, edge_model, neg_sampler, custom_config_file))
assert os.path.exists(custom_config_file)
custom_script = '_'.join([data, node_model, edge_model, neg_sampler]) + '.py'
os.system('dgl export --cfg {} --output {}'.format(custom_config_file, custom_script))
assert os.path.exists(custom_script)
@pytest.mark.parametrize('data', ['cora', 'citeseer', 'pubmed', 'csv', 'reddit',
'co-buy-computer', 'ogbn-arxiv', 'ogbn-products', 'ogbl-collab',
'ogbl-citation2'])
@pytest.mark.parametrize('node_model', ['gcn' ,'gat', 'sage', 'sgc', 'gin'])
@pytest.mark.parametrize('edge_model', ['ele', 'bilinear'])
def test_linkpred_default_neg_sampler(data, node_model, edge_model):
custom_config_file = '_'.join([data, node_model, edge_model]) + '.yaml'
os.system('dgl configure linkpred --data {} --node-model {} --edge-model {} --cfg {}'.format(
data, node_model, edge_model, custom_config_file))
assert os.path.exists(custom_config_file)
@pytest.mark.parametrize('recipe',
['linkpred_cora_sage.yaml',
'linkpred_citation2_sage.yaml',
'linkpred_collab_sage.yaml',
'nodepred_citeseer_gat.yaml',
'nodepred_citeseer_gcn.yaml',
'nodepred_citeseer_sage.yaml',
'nodepred_cora_gat.yaml',
'nodepred_cora_gcn.yaml',
'nodepred_cora_sage.yaml',
'nodepred_pubmed_gat.yaml',
'nodepred_pubmed_gcn.yaml',
'nodepred_pubmed_sage.yaml',
'nodepred-ns_arxiv_gcn.yaml',
'nodepred-ns_product_sage.yaml'])
def test_recipe(recipe):
# Remove all generated yaml files
current_dir = os.listdir("./")
for item in current_dir:
if item.endswith(".yaml"):
os.remove(item)
os.system('dgl recipe get {}'.format(recipe))
assert os.path.exists(recipe)
def test_node_cora():
os.system('dgl configure nodepred --data cora --model gcn')
os.system('dgl train --cfg nodepred_cora_gcn.yaml')
assert os.path.exists('checkpoint.pth')
assert os.path.exists('model.pth')
...@@ -31,7 +31,7 @@ popd ...@@ -31,7 +31,7 @@ popd
pushd python pushd python
for backend in pytorch mxnet tensorflow for backend in pytorch mxnet tensorflow
do do
conda activate "${backend}-ci" conda activate "${backend}-ci"
rm -rf build *.egg-info dist rm -rf build *.egg-info dist
pip uninstall -y dgl pip uninstall -y dgl
......
#!/bin/bash
. /opt/conda/etc/profile.d/conda.sh
function fail {
echo FAIL: $@
exit -1
}
export DGLBACKEND=pytorch
export DGL_LIBRARY_PATH=${PWD}/build
export PYTHONPATH=tests:${PWD}/python:$PYTHONPATH
export DGL_DOWNLOAD_DIR=${PWD}
conda activate pytorch-ci
pushd dglgo
rm -rf build *.egg-info dist
pip uninstall -y dglgo
python3 setup.py install
popd
export LC_ALL=C.UTF-8
export LANG=C.UTF-8
python3 -m pytest -v --junitxml=pytest_go.xml tests/go || fail "go"
export PYTHONUNBUFFERED=1
export OMP_NUM_THREADS=1
export DMLC_LOG_DEBUG=1
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment