Unverified Commit 266b21e5 authored by Jinjing Zhou's avatar Jinjing Zhou Committed by GitHub
Browse files

[DGL-Go] Change name to dglgo (#3778)



* add

* remove

* fix

* rework the readme and some changes

* add png

* update png

* add recipe get
Co-authored-by: default avatarMinjie Wang <wmjlyjemaine@gmail.com>
Co-authored-by: default avatarQuan (Andy) Gan <coin2028@hotmail.com>
parent d41d07d0
......@@ -31,4 +31,5 @@ general_pipeline:
name: Adam
lr: 0.005
loss: BCELoss
save_path: "model.pth"
num_runs: 1 # Number of experiments to run
......@@ -31,4 +31,5 @@ general_pipeline:
lr: 0.005
weight_decay: 0.0
loss: CrossEntropyLoss
save_path: "model.pth"
num_runs: 5
......@@ -35,4 +35,5 @@ general_pipeline:
lr: 0.005
weight_decay: 0.0
loss: CrossEntropyLoss
save_path: "model.pth"
num_runs: 5 # Number of experiments to run
......@@ -28,4 +28,5 @@ general_pipeline:
lr: 0.005
weight_decay: 0.0005
loss: CrossEntropyLoss
save_path: "model.pth"
num_runs: 10 # Number of experiments to run
......@@ -24,4 +24,5 @@ general_pipeline:
lr: 0.01
weight_decay: 0.0005
loss: CrossEntropyLoss
save_path: "model.pth"
num_runs: 10 # Number of experiments to run
......@@ -23,4 +23,5 @@ general_pipeline:
lr: 0.01
weight_decay: 0.0005
loss: CrossEntropyLoss
save_path: "model.pth"
num_runs: 10 # Number of experiments to run
......@@ -28,4 +28,5 @@ general_pipeline:
lr: 0.005
weight_decay: 0.0005
loss: CrossEntropyLoss
save_path: "model.pth"
num_runs: 10 # Number of experiments to run
......@@ -24,4 +24,5 @@ general_pipeline:
lr: 0.01
weight_decay: 0.0005
loss: CrossEntropyLoss
save_path: "model.pth"
num_runs: 10 # Number of experiments to run
......@@ -23,4 +23,5 @@ general_pipeline:
lr: 0.01
weight_decay: 0.0005
loss: CrossEntropyLoss
save_path: "model.pth"
num_runs: 10 # Number of experiments to run
......@@ -28,4 +28,5 @@ general_pipeline:
lr: 0.005
weight_decay: 0.001
loss: CrossEntropyLoss
save_path: "model.pth"
num_runs: 10 # Number of experiments to run
......@@ -24,4 +24,5 @@ general_pipeline:
lr: 0.01
weight_decay: 0.0005
loss: CrossEntropyLoss
save_path: "model.pth"
num_runs: 10 # Number of experiments to run
......@@ -23,4 +23,5 @@ general_pipeline:
lr: 0.01
weight_decay: 0.0005
loss: CrossEntropyLoss
save_path: "model.pth"
num_runs: 10 # Number of experiments to run
......@@ -3,7 +3,7 @@
from setuptools import find_packages
from distutils.core import setup
setup(name='dglenter',
setup(name='dglgo',
version='0.0.1',
description='DGL',
author='DGL Team',
......@@ -15,12 +15,15 @@ setup(name='dglenter',
'autopep8>=1.6.0',
'numpydoc>=1.1.0',
"pydantic>=1.9.0",
"ruamel.yaml>=0.17.20"
"ruamel.yaml>=0.17.20",
"PyYAML>=5.1"
],
package_data={"": ["./*"]},
include_package_data=True,
license='APACHE',
entry_points={
'console_scripts': [
"dgl-enter = dglenter.cli.cli:main"
"dgl = dglgo.cli.cli:main"
]
},
url='https://github.com/dmlc/dgl',
......
version: 0.0.1
pipeline_name: nodepred
device: cpu
data:
name: cora
split_ratio: # Ratio to generate split masks, for example set to [0.8, 0.1, 0.1] for 80% train/10% val/10% test. Leave blank to use builtin split in original dataset
model:
name: sage
embed_size: -1 # The dimension of created embedding table. -1 means using original node embedding
hidden_size: 16 # Hidden size.
num_layers: 1 # Number of hidden layers.
activation: relu # Activation function name under torch.nn.functional
dropout: 0.5 # Dropout rate.
aggregator_type: gcn # Aggregator type to use (``mean``, ``gcn``, ``pool``, ``lstm``).
general_pipeline:
early_stop:
patience: 20 # Steps before early stop
checkpoint_path: checkpoint.pth # Early stop checkpoint model file path
num_epochs: 200 # Number of training epochs
eval_period: 5 # Interval epochs between evaluations
optimizer:
name: Adam
lr: 0.01
weight_decay: 0.0005
loss: CrossEntropyLoss
num_runs: 1 # Number of experiments to run
python -m pytest --pdb -vv --capture=tee-sys test_pipeline.py::test_recipe
\ No newline at end of file
import subprocess
from typing import NamedTuple
import pytest
from pathlib import Path
# class DatasetSpec:
dataset_spec = {
"cora": {"timeout": 30}
}
class ExperimentSpec(NamedTuple):
pipeline: str
dataset: str
model: str
timeout: int
extra_cfg: dict = {}
exps = [ExperimentSpec(pipeline="nodepred", dataset="cora", model="sage", timeout=0.5)]
@pytest.mark.parametrize("spec", exps)
def test_train(spec):
cfg_path = "/tmp/test.yaml"
run = subprocess.run(["dgl", "config", spec.pipeline, "--data", spec.dataset, "--model", spec.model, "--cfg", cfg_path], timeout=spec.timeout, capture_output=True)
assert run.stderr is None or len(run.stderr) == 0, "Found error message: {}".format(run.stderr)
output = run.stdout.decode("utf-8")
print(output)
run = subprocess.run(["dgl", "train", "--cfg", cfg_path], timeout=spec.timeout, capture_output=True)
assert run.stderr is None or len(run.stderr) == 0, "Found error message: {}".format(run.stderr)
output = run.stdout.decode("utf-8")
print(output)
TEST_RECIPE_FOLDER = "my_recipes"
@pytest.fixture
def setup_recipe_folder():
run = subprocess.run(["dgl", "recipe", "copy", "--dir", TEST_RECIPE_FOLDER], timeout=15, capture_output=True)
@pytest.mark.parametrize("file", [str(f) for f in Path(TEST_RECIPE_FOLDER).glob("*.yaml")])
def test_recipe(file, setup_recipe_folder):
print("DGL enter train {}".format(file))
try:
run = subprocess.run(["dgl", "train", "--cfg", file], timeout=5, capture_output=True)
sh_stdout, sh_stderr = run.stdout, run.stderr
except subprocess.TimeoutExpired as e:
sh_stdout = e.stdout
sh_stderr = e.stderr
if sh_stderr is not None and len(sh_stderr) != 0:
error_str = sh_stderr.decode("utf-8")
lines = error_str.split("\n")
for line in lines:
line = line.strip()
if line.startswith("WARNING") or line.startswith("Aborted") or line.startswith("0%"):
continue
else:
assert len(line) == 0, error_str
print("{} stdout: {}".format(file, sh_stdout))
print("{} stderr: {}".format(file, sh_stderr))
# test_recipe( , None)
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment