"vscode:/vscode.git/clone" did not exist on "144e6e2540dd2cf5b0ba26438f4ff0da0ca2e659"
Unverified Commit be58224f authored by Da Zheng's avatar Da Zheng Committed by GitHub
Browse files

[CI] enable tutorial test in CI. (#631)

* enable tutorial test in CI.

* reduce overhead.
parent 7d619365
...@@ -217,11 +217,11 @@ pipeline { ...@@ -217,11 +217,11 @@ pipeline {
// unit_test_linux("pytorch", "cpu") // unit_test_linux("pytorch", "cpu")
// } // }
//} //}
//stage("Tutorial test") { stage("Tutorial test") {
// steps { steps {
// tutorial_test_linux("mxnet") tutorial_test_linux("mxnet")
// } }
//} }
} }
} }
stage("MXNet GPU") { stage("MXNet GPU") {
...@@ -243,11 +243,11 @@ pipeline { ...@@ -243,11 +243,11 @@ pipeline {
// unit_test_linux("pytorch", "cpu") // unit_test_linux("pytorch", "cpu")
// } // }
//} //}
//stage("Tutorial test") { stage("Tutorial test") {
// steps { steps {
// tutorial_test_linux("mxnet") tutorial_test_linux("mxnet")
// } }
//} }
} }
} }
} }
......
...@@ -250,9 +250,9 @@ class GCNSampling(gluon.Block): ...@@ -250,9 +250,9 @@ class GCNSampling(gluon.Block):
# dropout probability # dropout probability
dropout = 0.2 dropout = 0.2
# batch size # batch size
batch_size = 10000 batch_size = 1000
# number of neighbors to sample # number of neighbors to sample
num_neighbors = 8 num_neighbors = 4
# number of epochs # number of epochs
num_epochs = 1 num_epochs = 1
...@@ -267,6 +267,7 @@ trainer = gluon.Trainer(model.collect_params(), 'adam', ...@@ -267,6 +267,7 @@ trainer = gluon.Trainer(model.collect_params(), 'adam',
{'learning_rate': 0.03, 'wd': 0}) {'learning_rate': 0.03, 'wd': 0})
for epoch in range(num_epochs): for epoch in range(num_epochs):
i = 0
for nf in dgl.contrib.sampling.NeighborSampler(g, batch_size, for nf in dgl.contrib.sampling.NeighborSampler(g, batch_size,
num_neighbors, num_neighbors,
neighbor_type='in', neighbor_type='in',
...@@ -291,6 +292,10 @@ for epoch in range(num_epochs): ...@@ -291,6 +292,10 @@ for epoch in range(num_epochs):
# optimization # optimization
trainer.step(batch_size=1) trainer.step(batch_size=1)
print("Epoch[{}]: loss {}".format(epoch, loss.asscalar())) print("Epoch[{}]: loss {}".format(epoch, loss.asscalar()))
i += 1
# We only train the model with 32 mini-batches just for demonstration.
if i >= 32:
break
############################################################################## ##############################################################################
# Control Variate # Control Variate
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment