Unverified Commit d30a69bf authored by VoVAllen's avatar VoVAllen Committed by GitHub
Browse files

[Backend] TF backend (#978)

* tf

* add builtin support

* fiix

* pytest

* fix

* fix

* fix some bugs

* fix selecting

* fix todo

* fix test

* fix test fail in tf

* fix

* fix

* fix gather row

* fix gather row

* log backend

* fix gather row

* fix gather row

* fix for pytorch

* fix

* fix

* fix

* fix

* fix

* fix tests

* fix

* fix

* fix

* fix

* fix

* fix

* fix convert

* fix

* fix

* fix

* fix inplace

* add alignment setting

* add debug option

* Revert "add alignment setting"

This reverts commit ec63fb3506ea84fff7d447a1fbdfd1d5d1fb6110.

* tf ci

* fix lint

* fix lint

* add tfdlpack

* fix type

* add env

* fix backend

* fix

* fix tests

* remove one_hot

* remove comment

* remove comment

* fix

* use pip to install all

* fix test

* fix base

* fix

* fix

* add skip

* upgrade cmake

* change version

* change ci

* fix

* fix

* fix

* fix

* fix seg fault

* fix

* fix python version

* fix

* try fix

* fix

* fix

* tf takes longer time in ci

* change py version

* fix

* fix

* fix oom

* change kg env

* change kg env

* 啊啊啊啊啊啊啊啊啊啊啊啊啊啊啊啊啊啊啊啊啊啊啊啊啊啊啊啊啊啊啊啊啊啊啊啊啊啊

* 我再也不搞各种乱七八糟环境了……

* use pytest

* Chang image
parent cf9ba90f
...@@ -25,6 +25,10 @@ endif() ...@@ -25,6 +25,10 @@ endif()
dgl_option(USE_CUDA "Build with CUDA" OFF) dgl_option(USE_CUDA "Build with CUDA" OFF)
dgl_option(USE_OPENMP "Build with OpenMP" ON) dgl_option(USE_OPENMP "Build with OpenMP" ON)
dgl_option(BUILD_CPP_TEST "Build cpp unittest executables" OFF) dgl_option(BUILD_CPP_TEST "Build cpp unittest executables" OFF)
# Set debug compile option for gdb, only happens when -DCMAKE_BUILD_TYPE=DEBUG
if (NOT MSVC)
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -O0 -g3 -ggdb")
endif(NOT MSVC)
if(USE_CUDA) if(USE_CUDA)
message(STATUS "Build with CUDA support") message(STATUS "Build with CUDA support")
...@@ -83,7 +87,6 @@ endif(USE_OPENMP) ...@@ -83,7 +87,6 @@ endif(USE_OPENMP)
# configure minigun # configure minigun
add_definitions(-DENABLE_PARTIAL_FRONTIER=0) # disable minigun partial frontier compile add_definitions(-DENABLE_PARTIAL_FRONTIER=0) # disable minigun partial frontier compile
# Source file lists # Source file lists
file(GLOB DGL_SRC file(GLOB DGL_SRC
src/*.cc src/*.cc
......
#!/usr/bin/env groovy #!/usr/bin/env groovy
dgl_linux_libs = "build/libdgl.so, build/runUnitTests, python/dgl/_ffi/_cy3/core.cpython-35m-x86_64-linux-gnu.so" dgl_linux_libs = "build/libdgl.so, build/runUnitTests, python/dgl/_ffi/_cy3/core.cpython-36m-x86_64-linux-gnu.so"
// Currently DGL on Windows is not working with Cython yet // Currently DGL on Windows is not working with Cython yet
dgl_win64_libs = "build\\dgl.dll, build\\runUnitTests.exe" dgl_win64_libs = "build\\dgl.dll, build\\runUnitTests.exe"
...@@ -56,7 +56,7 @@ def cpp_unit_test_win64() { ...@@ -56,7 +56,7 @@ def cpp_unit_test_win64() {
def unit_test_linux(backend, dev) { def unit_test_linux(backend, dev) {
init_git() init_git()
unpack_lib("dgl-${dev}-linux", dgl_linux_libs) unpack_lib("dgl-${dev}-linux", dgl_linux_libs)
timeout(time: 5, unit: 'MINUTES') { timeout(time: 10, unit: 'MINUTES') {
sh "bash tests/scripts/task_unit_test.sh ${backend} ${dev}" sh "bash tests/scripts/task_unit_test.sh ${backend} ${dev}"
} }
} }
...@@ -119,7 +119,7 @@ pipeline { ...@@ -119,7 +119,7 @@ pipeline {
stage("Build") { stage("Build") {
parallel { parallel {
stage("CPU Build") { stage("CPU Build") {
agent { docker { image "dgllib/dgl-ci-cpu" } } agent { docker { image "dgllib/dgl-ci-cpu:conda" } }
steps { steps {
build_dgl_linux("cpu") build_dgl_linux("cpu")
} }
...@@ -132,7 +132,7 @@ pipeline { ...@@ -132,7 +132,7 @@ pipeline {
stage("GPU Build") { stage("GPU Build") {
agent { agent {
docker { docker {
image "dgllib/dgl-ci-gpu" image "dgllib/dgl-ci-gpu:conda"
args "--runtime nvidia" args "--runtime nvidia"
} }
} }
...@@ -165,7 +165,7 @@ pipeline { ...@@ -165,7 +165,7 @@ pipeline {
stage("Test") { stage("Test") {
parallel { parallel {
stage("C++ CPU") { stage("C++ CPU") {
agent { docker { image "dgllib/dgl-ci-cpu" } } agent { docker { image "dgllib/dgl-ci-cpu:conda" } }
steps { steps {
cpp_unit_test_linux() cpp_unit_test_linux()
} }
...@@ -186,8 +186,43 @@ pipeline { ...@@ -186,8 +186,43 @@ pipeline {
} }
} }
} }
stage("Tensorflow CPU") {
agent { docker { image "dgllib/dgl-ci-cpu:conda" } }
stages {
stage("Unit test") {
steps {
unit_test_linux("tensorflow", "cpu")
}
}
}
post {
always {
cleanWs disableDeferredWipeout: true, deleteDirs: true
}
}
}
stage("Tensorflow GPU") {
agent {
docker {
image "dgllib/dgl-ci-gpu:conda"
args "--runtime nvidia"
}
}
stages {
stage("Unit test") {
steps {
unit_test_linux("tensorflow", "gpu")
}
}
}
post {
always {
cleanWs disableDeferredWipeout: true, deleteDirs: true
}
}
}
stage("Torch CPU") { stage("Torch CPU") {
agent { docker { image "dgllib/dgl-ci-cpu" } } agent { docker { image "dgllib/dgl-ci-cpu:conda" } }
stages { stages {
stage("Unit test") { stage("Unit test") {
steps { steps {
...@@ -234,7 +269,7 @@ pipeline { ...@@ -234,7 +269,7 @@ pipeline {
stage("Torch GPU") { stage("Torch GPU") {
agent { agent {
docker { docker {
image "dgllib/dgl-ci-gpu" image "dgllib/dgl-ci-gpu:conda"
args "--runtime nvidia" args "--runtime nvidia"
} }
} }
...@@ -258,7 +293,7 @@ pipeline { ...@@ -258,7 +293,7 @@ pipeline {
} }
} }
stage("MXNet CPU") { stage("MXNet CPU") {
agent { docker { image "dgllib/dgl-ci-cpu" } } agent { docker { image "dgllib/dgl-ci-cpu:conda" } }
stages { stages {
stage("Unit test") { stage("Unit test") {
steps { steps {
...@@ -280,7 +315,7 @@ pipeline { ...@@ -280,7 +315,7 @@ pipeline {
stage("MXNet GPU") { stage("MXNet GPU") {
agent { agent {
docker { docker {
image "dgllib/dgl-ci-gpu" image "dgllib/dgl-ci-gpu:conda"
args "--runtime nvidia" args "--runtime nvidia"
} }
} }
...@@ -303,7 +338,7 @@ pipeline { ...@@ -303,7 +338,7 @@ pipeline {
stage("App") { stage("App") {
parallel { parallel {
stage("Knowledge Graph CPU") { stage("Knowledge Graph CPU") {
agent { docker { image "dgllib/dgl-ci-cpu:torch-1.2.0" } } agent { docker { image "dgllib/dgl-ci-cpu:conda" } }
stages { stages {
stage("Torch test") { stage("Torch test") {
steps { steps {
...@@ -325,7 +360,7 @@ pipeline { ...@@ -325,7 +360,7 @@ pipeline {
stage("Knowledge Graph GPU") { stage("Knowledge Graph GPU") {
agent { agent {
docker { docker {
image "dgllib/dgl-ci-gpu:torch-1.2.0" image "dgllib/dgl-ci-gpu:conda"
args "--runtime nvidia" args "--runtime nvidia"
} }
} }
......
...@@ -2,7 +2,6 @@ from dataloader import EvalDataset, TrainDataset ...@@ -2,7 +2,6 @@ from dataloader import EvalDataset, TrainDataset
from dataloader import get_dataset from dataloader import get_dataset
import argparse import argparse
import torch.multiprocessing as mp
import os import os
import logging import logging
import time import time
...@@ -10,9 +9,11 @@ import pickle ...@@ -10,9 +9,11 @@ import pickle
backend = os.environ.get('DGLBACKEND', 'pytorch') backend = os.environ.get('DGLBACKEND', 'pytorch')
if backend.lower() == 'mxnet': if backend.lower() == 'mxnet':
import multiprocessing as mp
from train_mxnet import load_model_from_checkpoint from train_mxnet import load_model_from_checkpoint
from train_mxnet import test from train_mxnet import test
else: else:
import torch.multiprocessing as mp
from train_pytorch import load_model_from_checkpoint from train_pytorch import load_model_from_checkpoint
from train_pytorch import test from train_pytorch import test
......
...@@ -10,22 +10,19 @@ RUN bash /install/ubuntu_install_core.sh ...@@ -10,22 +10,19 @@ RUN bash /install/ubuntu_install_core.sh
COPY install/ubuntu_install_build.sh /install/ubuntu_install_build.sh COPY install/ubuntu_install_build.sh /install/ubuntu_install_build.sh
RUN bash /install/ubuntu_install_build.sh RUN bash /install/ubuntu_install_build.sh
# ANTLR deps # python
COPY install/ubuntu_install_java.sh /install/ubuntu_install_java.sh COPY install/ubuntu_install_conda.sh /install/ubuntu_install_conda.sh
RUN bash /install/ubuntu_install_java.sh RUN bash /install/ubuntu_install_conda.sh
COPY install/ubuntu_install_antlr.sh /install/ubuntu_install_antlr.sh ENV CONDA_ALWAYS_YES="true"
RUN bash /install/ubuntu_install_antlr.sh
# python COPY install/conda_env/torch_cpu.yml /install/conda_env/torch_cpu.yml
COPY install/ubuntu_install_python.sh /install/ubuntu_install_python.sh RUN ["/bin/bash", "-i", "-c", "conda env create -f /install/conda_env/torch_cpu.yml"]
RUN bash /install/ubuntu_install_python.sh
COPY install/ubuntu_install_python_package.sh /install/ubuntu_install_python_package.sh COPY install/conda_env/tensorflow_cpu.yml /install/conda_env/tensorflow_cpu.yml
RUN bash /install/ubuntu_install_python_package.sh RUN ["/bin/bash", "-i", "-c", "conda env create -f /install/conda_env/tensorflow_cpu.yml"]
COPY install/ubuntu_install_torch.sh /install/ubuntu_install_torch.sh COPY install/conda_env/mxnet_cpu.yml /install/conda_env/mxnet_cpu.yml
RUN bash /install/ubuntu_install_torch.sh RUN ["/bin/bash", "-i", "-c", "conda env create -f /install/conda_env/mxnet_cpu.yml"]
COPY install/ubuntu_install_mxnet_cpu.sh /install/ubuntu_install_mxnet_cpu.sh ENV CONDA_ALWAYS_YES=
RUN bash /install/ubuntu_install_mxnet_cpu.sh \ No newline at end of file
...@@ -10,25 +10,13 @@ RUN bash /install/ubuntu_install_core.sh ...@@ -10,25 +10,13 @@ RUN bash /install/ubuntu_install_core.sh
COPY install/ubuntu_install_build.sh /install/ubuntu_install_build.sh COPY install/ubuntu_install_build.sh /install/ubuntu_install_build.sh
RUN bash /install/ubuntu_install_build.sh RUN bash /install/ubuntu_install_build.sh
# ANTLR deps
COPY install/ubuntu_install_java.sh /install/ubuntu_install_java.sh
RUN bash /install/ubuntu_install_java.sh
COPY install/ubuntu_install_antlr.sh /install/ubuntu_install_antlr.sh
RUN bash /install/ubuntu_install_antlr.sh
# python # python
COPY install/ubuntu_install_python.sh /install/ubuntu_install_python.sh COPY install/ubuntu_install_conda.sh /install/ubuntu_install_conda.sh
RUN bash /install/ubuntu_install_python.sh RUN bash /install/ubuntu_install_conda.sh
COPY install/ubuntu_install_python_package.sh /install/ubuntu_install_python_package.sh
RUN bash /install/ubuntu_install_python_package.sh
COPY install/ubuntu_install_torch_1.2.0.sh /install/ubuntu_install_torch.sh ENV CONDA_ALWAYS_YES="true"
RUN bash /install/ubuntu_install_torch.sh
COPY install/ubuntu_install_mxnet_cpu.sh /install/ubuntu_install_mxnet_cpu.sh COPY install/conda_env/kg_cpu.yml /install/conda_env/kg_cpu.yml
RUN bash /install/ubuntu_install_mxnet_cpu.sh RUN ["/bin/bash", "-i", "-c", "conda env create -f /install/conda_env/kg_cpu.yml"]
COPY install/FB15k.zip /data/kg/FB15k.zip ENV CONDA_ALWAYS_YES=
RUN cd /data/kg && unzip FB15k.zip \ No newline at end of file
# CI docker GPU env # CI docker GPU env
FROM nvidia/cuda:9.0-cudnn7-devel FROM nvidia/cuda:10.1-cudnn7-devel-ubuntu16.04
# Base scripts
RUN apt-get update --fix-missing RUN apt-get update --fix-missing
COPY install/ubuntu_install_core.sh /install/ubuntu_install_core.sh COPY install/ubuntu_install_core.sh /install/ubuntu_install_core.sh
...@@ -10,25 +9,22 @@ RUN bash /install/ubuntu_install_core.sh ...@@ -10,25 +9,22 @@ RUN bash /install/ubuntu_install_core.sh
COPY install/ubuntu_install_build.sh /install/ubuntu_install_build.sh COPY install/ubuntu_install_build.sh /install/ubuntu_install_build.sh
RUN bash /install/ubuntu_install_build.sh RUN bash /install/ubuntu_install_build.sh
# ANTLR deps # python
COPY install/ubuntu_install_java.sh /install/ubuntu_install_java.sh COPY install/ubuntu_install_conda.sh /install/ubuntu_install_conda.sh
RUN bash /install/ubuntu_install_java.sh RUN bash /install/ubuntu_install_conda.sh
COPY install/ubuntu_install_antlr.sh /install/ubuntu_install_antlr.sh ENV CONDA_ALWAYS_YES="true"
RUN bash /install/ubuntu_install_antlr.sh
# python COPY install/conda_env/torch_gpu.yml /install/conda_env/torch_gpu.yml
COPY install/ubuntu_install_python.sh /install/ubuntu_install_python.sh RUN ["/bin/bash", "-i", "-c", "conda env create -f /install/conda_env/torch_gpu.yml"]
RUN bash /install/ubuntu_install_python.sh
COPY install/ubuntu_install_python_package.sh /install/ubuntu_install_python_package.sh COPY install/conda_env/tensorflow_gpu.yml /install/conda_env/tensorflow_gpu.yml
RUN bash /install/ubuntu_install_python_package.sh RUN ["/bin/bash", "-i", "-c", "conda env create -f /install/conda_env/tensorflow_gpu.yml"]
COPY install/ubuntu_install_torch.sh /install/ubuntu_install_torch.sh COPY install/conda_env/mxnet_gpu.yml /install/conda_env/mxnet_gpu.yml
RUN bash /install/ubuntu_install_torch.sh RUN ["/bin/bash", "-i", "-c", "conda env create -f /install/conda_env/mxnet_gpu.yml"]
COPY install/ubuntu_install_mxnet_gpu.sh /install/ubuntu_install_mxnet_gpu.sh ENV CONDA_ALWAYS_YES=
RUN bash /install/ubuntu_install_mxnet_gpu.sh
# Environment variables # Environment variables
ENV PATH=/usr/local/nvidia/bin:${PATH} ENV PATH=/usr/local/nvidia/bin:${PATH}
...@@ -37,3 +33,5 @@ ENV CPLUS_INCLUDE_PATH=/usr/local/cuda/include:${CPLUS_INCLUDE_PATH} ...@@ -37,3 +33,5 @@ ENV CPLUS_INCLUDE_PATH=/usr/local/cuda/include:${CPLUS_INCLUDE_PATH}
ENV C_INCLUDE_PATH=/usr/local/cuda/include:${C_INCLUDE_PATH} ENV C_INCLUDE_PATH=/usr/local/cuda/include:${C_INCLUDE_PATH}
ENV LIBRARY_PATH=/usr/local/cuda/lib64:/usr/local/nvidia/lib64:${LIBRARY_PATH} ENV LIBRARY_PATH=/usr/local/cuda/lib64:/usr/local/nvidia/lib64:${LIBRARY_PATH}
ENV LD_LIBRARY_PATH=/usr/local/cuda/lib64:/usr/local/nvidia/lib64:${LD_LIBRARY_PATH} ENV LD_LIBRARY_PATH=/usr/local/cuda/lib64:/usr/local/nvidia/lib64:${LD_LIBRARY_PATH}
ENV CUDA_VISIBLE_DEVICES=0
ENV TF_FORCE_GPU_ALLOW_GROWTH=true
\ No newline at end of file
# CI docker GPU env # CI docker GPU env
FROM nvidia/cuda:9.0-cudnn7-devel FROM nvidia/cuda:10.1-cudnn7-devel-ubuntu16.04
# Base scripts
RUN apt-get update --fix-missing RUN apt-get update --fix-missing
COPY install/ubuntu_install_core.sh /install/ubuntu_install_core.sh COPY install/ubuntu_install_core.sh /install/ubuntu_install_core.sh
...@@ -10,25 +9,16 @@ RUN bash /install/ubuntu_install_core.sh ...@@ -10,25 +9,16 @@ RUN bash /install/ubuntu_install_core.sh
COPY install/ubuntu_install_build.sh /install/ubuntu_install_build.sh COPY install/ubuntu_install_build.sh /install/ubuntu_install_build.sh
RUN bash /install/ubuntu_install_build.sh RUN bash /install/ubuntu_install_build.sh
# ANTLR deps
COPY install/ubuntu_install_java.sh /install/ubuntu_install_java.sh
RUN bash /install/ubuntu_install_java.sh
COPY install/ubuntu_install_antlr.sh /install/ubuntu_install_antlr.sh
RUN bash /install/ubuntu_install_antlr.sh
# python # python
COPY install/ubuntu_install_python.sh /install/ubuntu_install_python.sh COPY install/ubuntu_install_conda.sh /install/ubuntu_install_conda.sh
RUN bash /install/ubuntu_install_python.sh RUN bash /install/ubuntu_install_conda.sh
COPY install/ubuntu_install_python_package.sh /install/ubuntu_install_python_package.sh ENV CONDA_ALWAYS_YES="true"
RUN bash /install/ubuntu_install_python_package.sh
COPY install/ubuntu_install_torch_1.2.0.sh /install/ubuntu_install_torch.sh COPY install/conda_env/kg_gpu.yml /install/conda_env/kg_gpu.yml
RUN bash /install/ubuntu_install_torch.sh RUN ["/bin/bash", "-i", "-c", "conda env create -f /install/conda_env/kg_gpu.yml"]
COPY install/ubuntu_install_mxnet_gpu.sh /install/ubuntu_install_mxnet_gpu.sh ENV CONDA_ALWAYS_YES=
RUN bash /install/ubuntu_install_mxnet_gpu.sh
COPY install/FB15k.zip /data/kg/FB15k.zip COPY install/FB15k.zip /data/kg/FB15k.zip
RUN cd /data/kg && unzip FB15k.zip RUN cd /data/kg && unzip FB15k.zip
......
name: kg-ci
dependencies:
- python=3.6.9
- pip
- pip:
- torch
- torchvision
- mxnet
- pytest
- nose
- numpy
- cython
- scipy
- networkx
- matplotlib
- nltk
- requests[security]
- tqdm
\ No newline at end of file
name: kg-ci
dependencies:
- python=3.6.9
- pip
- pip:
- torch
- torchvision
- mxnet-cu101
- pytest
- nose
- numpy
- cython
- scipy
- networkx
- matplotlib
- nltk
- requests[security]
- tqdm
\ No newline at end of file
name: mxnet-ci name: mxnet-ci
dependencies: dependencies:
- python=3.6.9
- pip - pip
- pip: - pip:
- mxnet - mxnet
- pytest
- nose - nose
- numpy - numpy
- cython - cython
......
name: mxnet-ci name: mxnet-ci
dependencies: dependencies:
- cudatoolkit = 9.0 - python=3.6.9
- pip - pip
- pip: - pip:
- mxnet-cu90 - mxnet-cu101
- pytest
- nose - nose
- numpy - numpy
- cython - cython
......
name: tensorflow-ci name: tensorflow-ci
dependencies: dependencies:
- python=3.6.9
- pip - pip
- pip: - pip:
- tf-nightly == 2.1.0.dev20191125 - tensorflow==2.1.0rc1
- tfdlpack - tfdlpack
- pytest
- nose - nose
- numpy - numpy
- cython - cython
......
name: tensorflow-ci name: tensorflow-ci
dependencies: dependencies:
- cudatoolkit = 10.1 - python=3.6.9
- pip - pip
- pip: - pip:
- tf-nightly-gpu == 2.1.0.dev20191125 - tensorflow-gpu==2.1.0rc1
- tfdlpack-gpu - tfdlpack-gpu
- pytest
- nose - nose
- numpy - numpy
- cython - cython
......
name: pytorch-ci name: pytorch-ci
channels:
- pytorch
dependencies: dependencies:
- python = 3.6 - python=3.6.9
- pytorch = 1.0.1
- pip - pip
- torchvision - pip:
- torch
- torchvision
- pytest
- nose - nose
- numpy - numpy
- cython - cython
......
name: pytorch-ci name: pytorch-ci
channels:
- pytorch
dependencies: dependencies:
- python = 3.6 - python=3.6.9
- pytorch = 1.0.1
- cudatoolkit = 9.2
- pip - pip
- torchvision - pip:
- torch
- torchvision
- pytest
- nose - nose
- numpy - numpy
- cython - cython
......
# install cmake 3.9 # install cmake 3.15, cmake>=3.12 is required for CUDA 10.1
wget https://cmake.org/files/v3.9/cmake-3.9.0.tar.gz version=3.15
tar xvf cmake-3.9.0.tar.gz build=5
cd cmake-3.9.0 mkdir ~/temp
./configure cd ~/temp
make -j4 wget https://cmake.org/files/v$version/cmake-$version.$build-Linux-x86_64.sh
make install sudo mkdir /opt/cmake
cd .. sudo sh cmake-$version.$build-Linux-x86_64.sh --prefix=/opt/cmake --skip-license
sudo ln -s /opt/cmake/bin/cmake /usr/local/bin/cmake
cd ~
rm -rf ~/temp
\ No newline at end of file
import argparse
import time
import math
import numpy as np
import networkx as nx
import tensorflow as tf
from dgl import DGLGraph
import dgl.function as fn
from dgl.data import register_data_args, load_data
from tensorflow.keras import layers
class GCNLayer(layers.Layer):
def __init__(self,
g,
in_feats,
out_feats,
activation,
dropout,
bias=True):
super(GCNLayer, self).__init__()
self.g = g
w_init = tf.random_normal_initializer()
self.weight = tf.Variable(initial_value=w_init(shape=(in_feats, out_feats),
dtype='float32'),
trainable=True)
if dropout:
self.dropout = layers.Dropout(rate=dropout)
else:
self.dropout = 0.
if bias:
b_init = tf.zeros_initializer()
self.bias = tf.Variable(initial_value=b_init(shape=(out_feats,),
dtype='float32'),
trainable=True)
else:
self.bias = None
self.activation = activation
def call(self, h):
if self.dropout:
h = self.dropout(h)
self.g.ndata['h'] = tf.matmul(h, self.weight)
self.g.ndata['norm_h'] = self.g.ndata['h'] * self.g.ndata['norm']
self.g.update_all(fn.copy_src('norm_h', 'm'),
fn.sum('m', 'h'))
h = self.g.ndata['h']
if self.bias is not None:
h = h + self.bias
if self.activation:
h = self.activation(h)
return h
class GCN(layers.Layer):
def __init__(self,
g,
in_feats,
n_hidden,
n_classes,
n_layers,
activation,
dropout):
super(GCN, self).__init__()
self.layers = []
# input layer
self.layers.append(
GCNLayer(g, in_feats, n_hidden, activation, dropout))
# hidden layers
for i in range(n_layers - 1):
self.layers.append(
GCNLayer(g, n_hidden, n_hidden, activation, dropout))
# output layer
self.layers.append(GCNLayer(g, n_hidden, n_classes, None, dropout))
def call(self, features):
h = features
for layer in self.layers:
h = layer(h)
return h
def evaluate(model, features, labels, mask):
logits = model(features, training=False)
logits = logits[mask]
labels = labels[mask]
indices = tf.math.argmax(logits, axis=1)
acc = tf.reduce_mean(tf.cast(indices == labels, dtype=tf.float32))
return acc.numpy().item()
def main(args):
# load and preprocess dataset
data = load_data(args)
if args.gpu < 0:
device = "/cpu:0"
else:
device = "/gpu:{}".format(args.gpu)
with tf.device(device):
features = tf.convert_to_tensor(data.features, dtype=tf.float32)
labels = tf.convert_to_tensor(data.labels, dtype=tf.int64)
train_mask = tf.convert_to_tensor(data.train_mask, dtype=tf.bool)
val_mask = tf.convert_to_tensor(data.val_mask, dtype=tf.bool)
test_mask = tf.convert_to_tensor(data.test_mask, dtype=tf.bool)
in_feats = features.shape[1]
n_classes = data.num_labels
n_edges = data.graph.number_of_edges()
print("""----Data statistics------'
#Edges %d
#Classes %d
#Train samples %d
#Val samples %d
#Test samples %d""" %
(n_edges, n_classes,
train_mask.numpy().sum(),
val_mask.numpy().sum(),
test_mask.numpy().sum()))
# graph preprocess and calculate normalization factor
g = data.graph
g.remove_edges_from(nx.selfloop_edges(g))
g = DGLGraph(g)
# # add self loop
g.add_edges(g.nodes(), g.nodes())
n_edges = g.number_of_edges()
# # normalization
degs = tf.cast(tf.identity(g.in_degrees()), dtype=tf.float32)
norm = tf.math.pow(degs, -0.5)
norm = tf.where(tf.math.is_inf(norm), tf.zeros_like(norm), norm)
g.ndata['norm'] = tf.expand_dims(norm, -1)
# create GCN model
model = GCN(g,
in_feats,
args.n_hidden,
n_classes,
args.n_layers,
tf.nn.relu,
args.dropout)
optimizer = tf.keras.optimizers.Adam(
learning_rate=args.lr, decay=args.weight_decay)
loss_fcn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True)
# initialize graph
dur = []
for epoch in range(args.n_epochs):
if epoch >= 3:
t0 = time.time()
# forward
with tf.GradientTape() as tape:
logits = model(features)
loss_value = loss_fcn(labels[train_mask], logits[train_mask])
grads = tape.gradient(loss_value, model.trainable_weights)
optimizer.apply_gradients(zip(grads, model.trainable_weights))
if epoch >= 3:
dur.append(time.time() - t0)
acc = evaluate(model, features, labels, val_mask)
print("Epoch {:05d} | Time(s) {:.4f} | Loss {:.4f} | Accuracy {:.4f} | "
"ETputs(KTEPS) {:.2f}". format(epoch, np.mean(dur), loss_value.numpy().item(),
acc, n_edges / np.mean(dur) / 1000))
acc = evaluate(model, features, labels, test_mask)
print("Test Accuracy {:.4f}".format(acc))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='GCN')
register_data_args(parser)
parser.add_argument("--dropout", type=float, default=0.5,
help="dropout probability")
parser.add_argument("--gpu", type=int, default=-1,
help="gpu")
parser.add_argument("--lr", type=float, default=1e-2,
help="learning rate")
parser.add_argument("--n-epochs", type=int, default=200,
help="number of training epochs")
parser.add_argument("--n-hidden", type=int, default=16,
help="number of hidden gcn units")
parser.add_argument("--n-layers", type=int, default=1,
help="number of hidden gcn layers")
parser.add_argument("--weight-decay", type=float, default=5e-4,
help="Weight for L2 loss")
args = parser.parse_args()
print(args)
main(args)
import argparse
import time
import math
import numpy as np
import networkx as nx
import tensorflow as tf
from dgl import DGLGraph
import dgl.function as fn
from dgl.data import register_data_args, load_data
from tensorflow.keras import layers
def gcn_msg(edge):
msg = edge.src['h'] * edge.src['norm']
return {'m': msg}
def gcn_reduce(node):
accum = tf.reduce_sum(node.mailbox['m'], 1) * node.data['norm']
return {'h': accum}
class GCNLayer(layers.Layer):
def __init__(self,
g,
in_feats,
out_feats,
activation,
dropout,
bias=True):
super(GCNLayer, self).__init__()
self.g = g
w_init = tf.random_normal_initializer()
self.weight = tf.Variable(initial_value=w_init(shape=(in_feats, out_feats),
dtype='float32'),
trainable=True)
if dropout:
self.dropout = layers.Dropout(rate=dropout)
else:
self.dropout = 0.
if bias:
b_init = tf.zeros_initializer()
self.bias = tf.Variable(initial_value=b_init(shape=(out_feats,),
dtype='float32'),
trainable=True)
else:
self.bias = None
self.activation = activation
def call(self, h):
if self.dropout:
h = self.dropout(h)
self.g.ndata['h'] = tf.matmul(h, self.weight)
self.g.update_all(gcn_msg, gcn_reduce)
h = self.g.ndata['h']
if self.bias is not None:
h = h + self.bias
if self.activation:
h = self.activation(h)
return h
class GCN(layers.Layer):
def __init__(self,
g,
in_feats,
n_hidden,
n_classes,
n_layers,
activation,
dropout):
super(GCN, self).__init__()
self.layers = []
# input layer
self.layers.append(
GCNLayer(g, in_feats, n_hidden, activation, dropout))
# hidden layers
for i in range(n_layers - 1):
self.layers.append(
GCNLayer(g, n_hidden, n_hidden, activation, dropout))
# output layer
self.layers.append(GCNLayer(g, n_hidden, n_classes, None, dropout))
def call(self, features):
h = features
for layer in self.layers:
h = layer(h)
return h
def evaluate(model, features, labels, mask):
logits = model(features, training=False)
logits = logits[mask]
labels = labels[mask]
indices = tf.math.argmax(logits, axis=1)
acc = tf.reduce_mean(tf.cast(indices == labels, dtype=tf.float32))
return acc.numpy().item()
def main(args):
# load and preprocess dataset
data = load_data(args)
if args.gpu < 0:
device = "/cpu:0"
else:
device = "/gpu:{}".format(args.gpu)
with tf.device(device):
features = tf.convert_to_tensor(data.features, dtype=tf.float32)
labels = tf.convert_to_tensor(data.labels, dtype=tf.int64)
train_mask = tf.convert_to_tensor(data.train_mask, dtype=tf.bool)
val_mask = tf.convert_to_tensor(data.val_mask, dtype=tf.bool)
test_mask = tf.convert_to_tensor(data.test_mask, dtype=tf.bool)
in_feats = features.shape[1]
n_classes = data.num_labels
n_edges = data.graph.number_of_edges()
print("""----Data statistics------'
#Edges %d
#Classes %d
#Train samples %d
#Val samples %d
#Test samples %d""" %
(n_edges, n_classes,
train_mask.numpy().sum(),
val_mask.numpy().sum(),
test_mask.numpy().sum()))
# graph preprocess and calculate normalization factor
g = data.graph
g.remove_edges_from(nx.selfloop_edges(g))
g = DGLGraph(g)
# # add self loop
g.add_edges(g.nodes(), g.nodes())
n_edges = g.number_of_edges()
# # normalization
degs = tf.cast(tf.identity(g.in_degrees()), dtype=tf.float32)
norm = tf.math.pow(degs, -0.5)
norm = tf.where(tf.math.is_inf(norm), tf.zeros_like(norm), norm)
g.ndata['norm'] = tf.expand_dims(norm, -1)
# create GCN model
model = GCN(g,
in_feats,
args.n_hidden,
n_classes,
args.n_layers,
tf.nn.relu,
args.dropout)
optimizer = tf.keras.optimizers.Adam(
learning_rate=args.lr, decay=args.weight_decay)
loss_fcn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True)
# initialize graph
dur = []
for epoch in range(args.n_epochs):
if epoch >= 3:
t0 = time.time()
# forward
with tf.GradientTape() as tape:
logits = model(features)
loss_value = loss_fcn(labels[train_mask], logits[train_mask])
grads = tape.gradient(loss_value, model.trainable_weights)
optimizer.apply_gradients(zip(grads, model.trainable_weights))
if epoch >= 3:
dur.append(time.time() - t0)
acc = evaluate(model, features, labels, val_mask)
print("Epoch {:05d} | Time(s) {:.4f} | Loss {:.4f} | Accuracy {:.4f} | "
"ETputs(KTEPS) {:.2f}". format(epoch, np.mean(dur), loss_value.numpy().item(),
acc, n_edges / np.mean(dur) / 1000))
acc = evaluate(model, features, labels, test_mask)
print("Test Accuracy {:.4f}".format(acc))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='GCN')
register_data_args(parser)
parser.add_argument("--dropout", type=float, default=0.5,
help="dropout probability")
parser.add_argument("--gpu", type=int, default=-1,
help="gpu")
parser.add_argument("--lr", type=float, default=1e-2,
help="learning rate")
parser.add_argument("--n-epochs", type=int, default=200,
help="number of training epochs")
parser.add_argument("--n-hidden", type=int, default=16,
help="number of hidden gcn units")
parser.add_argument("--n-layers", type=int, default=1,
help="number of hidden gcn layers")
parser.add_argument("--weight-decay", type=float, default=5e-4,
help="Weight for L2 loss")
args = parser.parse_args()
print(args)
main(args)
...@@ -1178,25 +1178,6 @@ def zerocopy_from_dgl_ndarray(input): ...@@ -1178,25 +1178,6 @@ def zerocopy_from_dgl_ndarray(input):
pass pass
def one_hot(t, num_classes=-1):
"""
Convert tensor to one-hot tensor
Parameters
--------------
t: tensor
class values of any shape.
num_classes: int (Default: -1)
Total number of classes. If set to -1, the number
of classes will be inferred as one greater than the largest class
value in the input tensor.
Returns
-------
Tensor
"""
pass
############################################################################### ###############################################################################
# Custom Operators for graph level computations. # Custom Operators for graph level computations.
......
...@@ -358,10 +358,6 @@ def zerocopy_to_dgl_ndarray_for_write(arr): ...@@ -358,10 +358,6 @@ def zerocopy_to_dgl_ndarray_for_write(arr):
def zerocopy_from_dgl_ndarray(arr): def zerocopy_from_dgl_ndarray(arr):
return nd.from_dlpack(arr.to_dlpack()) return nd.from_dlpack(arr.to_dlpack())
def one_hot(t, num_classes=-1):
if num_classes == -1:
num_classes = mx.nd.max(t).asscalar() + 1
return mx.nd.one_hot(t, num_classes)
class BinaryReduce(mx.autograd.Function): class BinaryReduce(mx.autograd.Function):
def __init__(self, reducer, binary_op, graph, lhs, rhs, out_size, lhs_map, def __init__(self, reducer, binary_op, graph, lhs, rhs, out_size, lhs_map,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment