"...text-generation-inference.git" did not exist on "dd8691b7c51bf4e5c7431403f50ab992a4eebc6f"
Commit 09663f9a authored by rusty1s's avatar rusty1s
Browse files

benchmark notebook

parent 32a60c2d
...@@ -4,5 +4,6 @@ build/ ...@@ -4,5 +4,6 @@ build/
dist/ dist/
.cache/ .cache/
.eggs/ .eggs/
ipynb_checkpoints/
*.egg-info/ *.egg-info/
*.so *.so
{
"cells": [
{
"cell_type": "code",
"execution_count": 12,
"metadata": {},
"outputs": [],
"source": [
"import time\n",
"\n",
"import torch\n",
"import torch_scatter as ts"
]
},
{
"cell_type": "code",
"execution_count": 13,
"metadata": {},
"outputs": [],
"source": [
"def get_uniform_data(n, type):\n",
" output = type(n).fill_(0)\n",
" index = torch.arange(0, n, out=torch.LongTensor())\n",
" input = type(n).fill_(1)\n",
" return output, index, input\n",
"\n",
"def get_even_data(n, type):\n",
" output = type(n).fill_(0)\n",
" index = torch.LongTensor(n).fill_(0)\n",
" input = type(n).fill_(1)\n",
" return output, index, input\n",
"\n",
"n = 1000000\n",
"num_runs = 100"
]
},
{
"cell_type": "code",
"execution_count": 14,
"metadata": {},
"outputs": [],
"source": [
"types = [torch.FloatTensor, torch.DoubleTensor, torch.ByteTensor, torch.CharTensor,\n",
" torch.ShortTensor, torch.IntTensor, torch.LongTensor]"
]
},
{
"cell_type": "code",
"execution_count": 15,
"metadata": {},
"outputs": [],
"source": [
"def benchmark(name, output, index, input):\n",
" func = getattr(ts, name)\n",
" runtimes = []\n",
" for type in types:\n",
" runtime = 0\n",
" for i in range(num_runs):\n",
" t = time.process_time()\n",
" func(output, index, input, 0)\n",
" runtime += time.process_time() - t\n",
" runtime /= num_runs\n",
" runtimes.append(runtime)\n",
" return runtimes\n",
"\n",
"def benchmark_pytorch_scatter_add(output, index, input):\n",
" runtimes = []\n",
" for type in types:\n",
" runtime = 0\n",
" for i in range(num_runs):\n",
" t = time.process_time()\n",
" output.scatter_add_(0, index, input)\n",
" runtime += time.process_time() - t\n",
" runtime /= num_runs\n",
" runtimes.append(runtime)\n",
" return runtimes"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# CPU Benchmark"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Compare to PyTorch `scatter_add_`"
]
},
{
"cell_type": "code",
"execution_count": 16,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[0.0013608833600000025, 0.0010018577800000373, 0.001011737080000028, 0.0010304318000000912, 0.0010575933199999277, 0.001046791089999779, 0.001026226260000076]\n",
"[0.0024379739199999763, 0.0020916335999999623, 0.0021022859400000016, 0.0020662273700000535, 0.002074936339999951, 0.002058829469999992, 0.002079201120000054]\n"
]
}
],
"source": [
"output, index, input = get_uniform_data(n, type=torch.FloatTensor)\n",
"runtimes = benchmark_pytorch_scatter_add(output, index, input)\n",
"print(runtimes)\n",
"\n",
"output, index, input = get_even_data(n, type=torch.FloatTensor)\n",
"runtimes = benchmark_pytorch_scatter_add(output, index, input)\n",
"print(runtimes)"
]
},
{
"cell_type": "code",
"execution_count": 17,
"metadata": {
"scrolled": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[0.00494133797000007, 0.004897300620000032, 0.0045747565699999805, 0.004568737810000112, 0.004554663379999937, 0.004549899970000002, 0.004568819980000019]\n",
"[0.0049159168400001365, 0.004562161230000079, 0.004574221990000033, 0.004564846730000127, 0.004551143499999953, 0.00455917282999998, 0.004575252730000017]\n"
]
}
],
"source": [
"output, index, input = get_uniform_data(n, type=torch.FloatTensor)\n",
"runtimes = benchmark('scatter_add_', output, index, input)\n",
"print(runtimes)\n",
"\n",
"output, index, input = get_even_data(n, type=torch.FloatTensor)\n",
"runtimes = benchmark('scatter_add_', output, index, input)\n",
"print(runtimes)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Compare internally"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# GPU Benchmark"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Compare to PyTorch `scatter_add_`"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Compare internally"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.5.2"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
{
"cells": [
{
"cell_type": "code",
"execution_count": 12,
"metadata": {},
"outputs": [],
"source": [
"import time\n",
"\n",
"import torch\n",
"import torch_scatter as ts"
]
},
{
"cell_type": "code",
"execution_count": 13,
"metadata": {},
"outputs": [],
"source": [
"def get_uniform_data(n, type):\n",
" output = type(n).fill_(0)\n",
" index = torch.arange(0, n, out=torch.LongTensor())\n",
" input = type(n).fill_(1)\n",
" return output, index, input\n",
"\n",
"def get_even_data(n, type):\n",
" output = type(n).fill_(0)\n",
" index = torch.LongTensor(n).fill_(0)\n",
" input = type(n).fill_(1)\n",
" return output, index, input\n",
"\n",
"n = 1000000\n",
"num_runs = 100"
]
},
{
"cell_type": "code",
"execution_count": 14,
"metadata": {},
"outputs": [],
"source": [
"types = [torch.FloatTensor, torch.DoubleTensor, torch.ByteTensor, torch.CharTensor,\n",
" torch.ShortTensor, torch.IntTensor, torch.LongTensor]"
]
},
{
"cell_type": "code",
"execution_count": 15,
"metadata": {},
"outputs": [],
"source": [
"def benchmark(name, output, index, input):\n",
" func = getattr(ts, name)\n",
" runtimes = []\n",
" for type in types:\n",
" runtime = 0\n",
" for i in range(num_runs):\n",
" t = time.process_time()\n",
" func(output, index, input, 0)\n",
" runtime += time.process_time() - t\n",
" runtime /= num_runs\n",
" runtimes.append(runtime)\n",
" return runtimes\n",
"\n",
"def benchmark_pytorch_scatter_add(output, index, input):\n",
" runtimes = []\n",
" for type in types:\n",
" runtime = 0\n",
" for i in range(num_runs):\n",
" t = time.process_time()\n",
" output.scatter_add_(0, index, input)\n",
" runtime += time.process_time() - t\n",
" runtime /= num_runs\n",
" runtimes.append(runtime)\n",
" return runtimes"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# CPU Benchmark"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Compare to PyTorch `scatter_add_`"
]
},
{
"cell_type": "code",
"execution_count": 16,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[0.0013608833600000025, 0.0010018577800000373, 0.001011737080000028, 0.0010304318000000912, 0.0010575933199999277, 0.001046791089999779, 0.001026226260000076]\n",
"[0.0024379739199999763, 0.0020916335999999623, 0.0021022859400000016, 0.0020662273700000535, 0.002074936339999951, 0.002058829469999992, 0.002079201120000054]\n"
]
}
],
"source": [
"output, index, input = get_uniform_data(n, type=torch.FloatTensor)\n",
"runtimes = benchmark_pytorch_scatter_add(output, index, input)\n",
"print(runtimes)\n",
"\n",
"output, index, input = get_even_data(n, type=torch.FloatTensor)\n",
"runtimes = benchmark_pytorch_scatter_add(output, index, input)\n",
"print(runtimes)"
]
},
{
"cell_type": "code",
"execution_count": 17,
"metadata": {
"scrolled": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[0.00494133797000007, 0.004897300620000032, 0.0045747565699999805, 0.004568737810000112, 0.004554663379999937, 0.004549899970000002, 0.004568819980000019]\n",
"[0.0049159168400001365, 0.004562161230000079, 0.004574221990000033, 0.004564846730000127, 0.004551143499999953, 0.00455917282999998, 0.004575252730000017]\n"
]
}
],
"source": [
"output, index, input = get_uniform_data(n, type=torch.FloatTensor)\n",
"runtimes = benchmark('scatter_add_', output, index, input)\n",
"print(runtimes)\n",
"\n",
"output, index, input = get_even_data(n, type=torch.FloatTensor)\n",
"runtimes = benchmark('scatter_add_', output, index, input)\n",
"print(runtimes)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Compare internally"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# GPU Benchmark"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Compare to PyTorch `scatter_add_`"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Compare internally"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.5.2"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment