"src/git@developer.sourcefind.cn:renzhc/diffusers_dcu.git" did not exist on "8b975882228595521864f84a66fa0081cc1432d5"
Unverified Commit 1b37545d authored by Jinjing Zhou's avatar Jinjing Zhou Committed by GitHub
Browse files

[Regression] Fix regression test (#1426)

* fix

* fix

* fix

* fix

* fix

* 111

* fix

* fix

* fix

* test

* ff

* fix

* ff

* fix

* f

* merge

* fix
parent 43d9f43a
......@@ -150,3 +150,10 @@ cscope.*
# parameters
*.params
# vscode
.clangd
.vscode
# asv
.asv
\ No newline at end of file
......@@ -8,19 +8,19 @@
"project_url": "https://github.com/dmlc/dgl",
// The URL or local path of the source code repository for the
// project being benchmarked
"repo": "https://github.com/dmlc/dgl.git",
"repo": ".",
// The Python project's subdirectory in your repo. If missing or
// the empty string, the project is assumed to be located at the root
// of the repository.
// "repo_subdir": "",
// "repo_subdir": "python",
// Customizable commands for building, installing, and
// uninstalling the project. See asv.conf.json documentation.
//
"install_command": [
"in-dir={env_dir} python -m pip install numpy"
"/bin/bash {build_dir}/tests/regression/install_dgl_asv.sh"
],
"build_command": [
"python -c \"print('skip')\""
"/bin/bash {build_dir}/tests/regression/build_dgl_asv.sh"
],
"uninstall_command": [
"return-code=any python -mpip uninstall -y dgl"
......@@ -31,7 +31,7 @@
// ],
// List of branches to benchmark. If not provided, defaults to "master"
// (for git) or "default" (for mercurial).
// "branches": ["master"], // for git
"branches": ["master"], // for git
// "branches": ["default"], // for mercurial
// The DVCS being used. If not set, it will be automatically
// determined from "repo" by looking at the protocol in the URL
......@@ -107,7 +107,7 @@
"benchmark_dir": "tests/regression",
// The directory (relative to the current directory) to cache the Python
// environments in. If not provided, defaults to "env"
// "env_dir": "env",
"env_dir": ".asv/env",
// The directory (relative to the current directory) that raw benchmark
// results are stored in. If not provided, defaults to "results".
"results_dir": "asv/results",
......
......@@ -18,12 +18,15 @@ The basic use is execute a script, and get the needed results out of the printed
## Run locally
The default regression branch in asv is `master`. If you need to run on other branch on your fork, please change the `branches` value in the `asv.conf.json` at the root of your repo.
```bash
docker run --name dgl-reg --rm --hostname=reg-machine --runtime=nvidia -dit dgllib/dgl-ci-gpu:conda /bin/bash
docker cp /home/ubuntu/asv_data dgl-reg:/root/asv_data/
docker cp ./asv_data dgl-reg:/root/asv_data/
docker cp ./run.sh dgl-reg:/root/run.sh <repo> <branch>
docker exec dgl-reg bash /root/asv_data/run.sh
docker cp dgl-reg:/root/regression/dgl/asv/. /home/ubuntu/asv_data/ # Change /home/ubuntu/asv to the path you want to put the result
docker cp dgl-reg:/root/regression/dgl/asv/. ./asv_data/ # Change /home/ubuntu/asv to the path you want to put the result
docker stop dgl-reg
```
And in the directory you choose (such as `/home/ubuntu/asv_data`), there's a `html` directory. You can use `python -m http.server` to start a server to see the result
The running result will be at `./asv_data/`. You can use `python -m http.server` inside the `html` folder to start a server to see the result
Empty folder for asv data place holder
\ No newline at end of file
......@@ -16,12 +16,12 @@ class GCNBenchmark:
param_names = ['backend', 'dataset', 'gpu_id']
timeout = 120
# def setup_cache(self):
# self.tmp_dir = Path(tempfile.mkdtemp())
def __init__(self):
self.std_log = {}
def setup(self, backend, dataset, gpu_id):
log_filename = Path("gcn_{}_{}_{}.log".format(backend, dataset, gpu_id))
if log_filename.exists():
key_name = "{}_{}_{}".format(backend, dataset, gpu_id)
if key_name in self.std_log:
return
gcn_path = base_path / "examples/{}/gcn/train.py".format(backend)
bashCommand = "/opt/conda/envs/{}-ci/bin/python {} --dataset {} --gpu {} --n-epochs 50".format(
......@@ -29,12 +29,13 @@ class GCNBenchmark:
process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE,env=dict(os.environ, DGLBACKEND=backend))
output, error = process.communicate()
print(str(error))
log_filename.write_text(str(output))
self.std_log[key_name] = str(output)
def track_gcn_time(self, backend, dataset, gpu_id):
log_filename = Path("{}_{}_{}.log".format(backend, dataset, gpu_id))
lines = log_filename.read_text().split("\\n")
key_name = "{}_{}_{}".format(backend, dataset, gpu_id)
lines = self.std_log[key_name].split("\\n")
time_list = []
for line in lines:
# print(line)
......@@ -45,8 +46,9 @@ class GCNBenchmark:
return np.array(time_list)[-10:].mean()
def track_gcn_accuracy(self, backend, dataset, gpu_id):
log_filename = Path("{}_{}_{}.log".format(backend, dataset, gpu_id))
lines = log_filename.read_text().split("\\n")
key_name = "{}_{}_{}".format(backend, dataset, gpu_id)
lines = self.std_log[key_name].split("\\n")
test_acc = -1
for line in lines:
if 'Test accuracy' in line:
......
......@@ -16,25 +16,25 @@ class SAGEBenchmark:
param_names = ['backend', 'gpu']
timeout = 1800
# def setup_cache(self):
# self.tmp_dir = Path(tempfile.mkdtemp())
def __init__(self):
self.std_log = {}
def setup(self, backend, gpu):
log_filename = Path("sage_sampling_{}_{}.log".format(backend, gpu))
if log_filename.exists():
key_name = "{}_{}".format(backend, gpu)
if key_name in self.std_log:
return
run_path = base_path / "examples/{}/graphsage/train_sampling.py".format(backend)
bashCommand = "/opt/conda/envs/{}-ci/bin/python {} --num-workers=4 --num-epochs=16 --gpu={}".format(
bashCommand = "/opt/conda/envs/{}-ci/bin/python {} --num-workers=2 --num-epochs=16 --gpu={}".format(
backend, run_path.expanduser(), gpu)
process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE,env=dict(os.environ, DGLBACKEND=backend))
output, error = process.communicate()
print(str(error))
log_filename.write_text(str(output))
self.std_log[key_name] = str(output)
def track_sage_time(self, backend):
log_filename = Path("sage_sampling_{}_{}.log".format(backend, gpu))
lines = log_filename.read_text().split("\\n")
def track_sage_time(self, backend, gpu):
key_name = key_name = "{}_{}".format(backend, gpu)
lines = self.std_log[key_name].split("\\n")
time_list = []
for line in lines:
if line.startswith('Epoch Time'):
......@@ -42,9 +42,9 @@ class SAGEBenchmark:
time_list.append(float(time_str))
return np.array(time_list).mean()
def track_sage_accuracy(self, backend):
log_filename = Path("sage_sampling_{}_{}.log".format(backend, gpu))
lines = log_filename.read_text().split("\\n")
def track_sage_accuracy(self, backend, gpu):
key_name = key_name = "{}_{}".format(backend, gpu)
lines = self.std_log[key_name].split("\\n")
test_acc = 0.
for line in lines:
if line.startswith('Eval Acc'):
......
mkdir build
CMAKE_VARS="-DUSE_CUDA=ON"
rm -rf _download
pushd build
cmake $CMAKE_VARS ..
make -j4
popd
#!/bin/bash
set -e
python -m pip install numpy
. /opt/conda/etc/profile.d/conda.sh
pushd python
for backend in pytorch mxnet tensorflow
do
conda activate "${backend}-ci"
rm -rf build *.egg-info dist
pip uninstall -y dgl
# test install
python3 setup.py install
# test inplace build (for cython)
python3 setup.py build_ext --inplace
done
popd
conda deactivate
\ No newline at end of file
......@@ -11,7 +11,10 @@ else
fi
docker run --name dgl-reg --rm --hostname=reg-machine --runtime=nvidia -dit dgllib/dgl-ci-gpu:conda /bin/bash
docker cp /home/ubuntu/asv_data dgl-reg:/root/asv_data/
docker exec dgl-reg bash /root/asv_data/run.sh $REPO $BRANCH
docker cp dgl-reg:/root/regression/dgl/asv/. /home/ubuntu/asv_data/
docker cp ./asv_data dgl-reg:/root/asv_data/
docker cp ./run.sh dgl-reg:/root/run.sh
docker exec dgl-reg bash /root/run.sh $REPO $BRANCH
docker cp dgl-reg:/root/regression/dgl/asv/. ./asv_data/
docker stop dgl-reg
......@@ -16,8 +16,8 @@ mkdir regression
cd regression
# git config core.filemode false
git clone --recursive https://github.com/$REPO/dgl.git
git checkout $BRANCH
cd dgl
git checkout $BRANCH
mkdir asv
cp -r ~/asv_data/* asv/
......@@ -25,7 +25,7 @@ conda activate base
pip install --upgrade pip
pip install asv numpy
source /root/regression/dgl/tests/scripts/build_dgl.sh gpu
export DGL_LIBRARY_PATH="~/dgl/build"
conda activate base
asv machine --yes
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment