"vscode:/vscode.git/clone" did not exist on "9efcac38af58b7247e205c47efe090b4c6ec7574"
Unverified Commit b8aa893b authored by Jiezhong Qiu's avatar Jiezhong Qiu Committed by GitHub
Browse files

Merge pull request #1 from xptree/laekov/multigpu

Faster MoE implementation for both single GPU and multiple GPUs
parents ef83c893 b9c28810
#ifndef CUBLAS_WRAPPER_H
#define CUBLAS_WRAPPER_H
#include <cublas_v2.h>
#include <cublas_v2.h>
inline cublasStatus_t cublasXgemmBatched(cublasHandle_t handle,
cublasOperation_t transa,
......
#include <unordered_map>
#include <mutex>
#include <cassert>
#include <thread>
#include "cuda_stream_manager.h"
#include <helper_cuda.h>
thread_local CudaStreamManager smgr;
#define SMGR_N_STREAMS 4
cudaStream_t CudaStreamManager::stream(size_t idx) {
return this->streams[idx % SMGR_N_STREAMS];
}
cublasHandle_t CudaStreamManager::handle(size_t idx) {
return this->handles[idx % SMGR_N_STREAMS];
}
void CudaStreamManager::sync(int idx) {
for (int i = 0; i < idx && i < SMGR_N_STREAMS; ++i) {
cudaStreamSynchronize(streams[i]);
}
}
void CudaStreamManager::setup(const int device) {
checkCudaErrors(cudaSetDevice(device));
streams = new cudaStream_t[SMGR_N_STREAMS];
handles = new cublasHandle_t[SMGR_N_STREAMS];
for (size_t i = 0; i < SMGR_N_STREAMS; ++i) {
checkCudaErrors(cudaStreamCreate(streams + i));
checkCudaErrors(cublasCreate(handles + i));
cublasSetStream(handles[i], streams[i]);
}
#ifdef MOE_USE_NCCL
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
/*
CudaStreamManager* getCudaStreamManager(const size_t num_expert, const int device) {
if (!smgr) {
smgr = new CudaStreamManager(num_expert, device);
}
assert(smgr->num_expert == num_expert);
assert(smgr->device == device);
return smgr;
ncclUniqueId uid;
if (rank == 0) {
ncclGetUniqueId(&uid);
}
MPI_Bcast(&uid, sizeof(uid), MPI_BYTE, 0, MPI_COMM_WORLD);
NCCL_SAFE_CALL(ncclCommInitRank(&ncclcomm, size, uid, rank));
#endif
}
*/
void CudaStreamManager::destroy() {
for (size_t i = 0; i < SMGR_N_STREAMS; ++i) {
checkCudaErrors(cudaStreamDestroy(streams[i]));
checkCudaErrors(cublasDestroy(handles[i]));
}
delete[] streams;
delete[] handles;
}
std::unordered_map<int, CudaStreamManager*> smgrs;
std::mutex smgr_mtx;
CudaStreamManager* getCudaStreamManager(const int device) {
auto it = smgrs.find(device);
if (it == smgrs.end()) {
smgr_mtx.lock();
it = smgrs.find(device);
if (it == smgrs.end()) {
auto smgr = new CudaStreamManager(device);
smgrs.insert(std::pair<int, CudaStreamManager*>(device, smgr));
smgr_mtx.unlock();
return smgr;
} else {
smgr_mtx.unlock();
}
}
return it->second;
}
......@@ -3,51 +3,48 @@
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include <helper_cuda.h>
#include <cstdio>
#ifdef MOE_USE_NCCL
#include <mpi.h>
#include <nccl.h>
#define NCCL_SAFE_CALL(__fn__) { \
auto __res__ = __fn__; \
if (__res__ != ncclSuccess) { \
fprintf(stderr, "NCCL Error at %s:%d value %d\n", __FILE__, __LINE__, __res__); \
exit(-1); \
} \
}
#endif
class CudaStreamManager {
public:
CudaStreamManager() : num_expert(0), device(0), streams(NULL) {
int current_device;
checkCudaErrors(cudaGetDevice(&current_device));
#ifdef MOE_DEBUG
printf("constructor at device %d\n", current_device);
int device;
cublasHandle_t* handles;
cudaStream_t* streams;
#ifdef MOE_USE_NCCL
int rank, size;
ncclComm_t ncclcomm;
#endif
}
void setup(const size_t num_expert, const int device) {
#ifdef MOE_DEBUG
printf("setup at device %d\n", device);
#endif
this->num_expert = num_expert;
this->device = device;
checkCudaErrors(cudaSetDevice(device));
streams = new cudaStream_t[num_expert];
checkCudaErrors(cublasCreate(&handle));
for (size_t i=0; i<num_expert; ++i) {
checkCudaErrors(cudaStreamCreate(streams+i));
}
public:
CudaStreamManager(int device_): device(device_) {
this->setup(device);
}
void setup(int);
void sync(int=0);
void destroy();
cudaStream_t stream(size_t=0);
cublasHandle_t handle(size_t=0);
~CudaStreamManager() {
#ifdef MOE_DEBUG
printf("destructor at device %d\n", device);
#endif
for (size_t i=0; i<num_expert; ++i) {
checkCudaErrors(cudaStreamDestroy(*(streams+i)));
}
checkCudaErrors(cublasDestroy(handle));
delete[] streams;
this->destroy();
}
size_t num_expert;
int device;
cublasHandle_t handle;
cudaStream_t* streams;
};
// CudaStreamManager* getCudaStreamManager(const size_t num_expert, const int device);
CudaStreamManager* getCudaStreamManager(const int device);
#endif // CUDA_STREAM_MANAGER
......@@ -4,58 +4,98 @@
#include <iostream>
#include <vector>
std::vector<torch::Tensor> moe_cuda_forward(
torch::Tensor input,
torch::Tensor gate,
torch::Tensor weight);
std::vector<torch::Tensor> moe_cuda_backward(
torch::Tensor grad_output,
torch::Tensor input,
torch::Tensor gate,
torch::Tensor weight);
// C++ interface
#include "moe_cuda_kernel.h"
// NOTE: AT_ASSERT has become AT_CHECK on master after 0.4.
#define CHECK_CUDA(x) AT_ASSERTM(x.type().is_cuda(), #x " must be a CUDA tensor")
#define CHECK_CONTIGUOUS(x) AT_ASSERTM(x.is_contiguous(), #x " must be contiguous")
#define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)
std::vector<torch::Tensor> moe_expert_count(
torch::Tensor gate,
size_t num_expert) {
CHECK_INPUT(gate);
return moe_cuda_expert_count(gate, num_expert);
}
std::vector<torch::Tensor> moe_local_scatter(
torch::Tensor input,
torch::Tensor pos) {
CHECK_INPUT(input);
return moe_cuda_local_scatter(input, pos);
}
std::vector<torch::Tensor> moe_local_gather(
torch::Tensor output_buf,
torch::Tensor pos) {
CHECK_INPUT(output_buf);
return moe_cuda_local_gather(output_buf, pos);
}
std::vector<torch::Tensor> moe_forward(
torch::Tensor input, // [batch_size x in_feat]
torch::Tensor gate, // [batch_size]
torch::Tensor weight // [num_expert x out_feat x in_feat]
torch::Tensor input_buf, // [batch_size x in_feat]
torch::Tensor weight, // [num_expert x out_feat x in_feat]
torch::Tensor expert_count // [batch_size]
) {
CHECK_INPUT(input);
CHECK_INPUT(gate);
CHECK_INPUT(input_buf);
CHECK_INPUT(weight);
/*
The bias term should have been merged into weight. Note the following fact that
Wx+b = [W b] [x]
[1]
*/
return moe_cuda_forward(input, gate, weight);
return moe_cuda_forward(input_buf, weight, expert_count);
}
std::vector<torch::Tensor> moe_backward(
torch::Tensor grad_output, // [batch_size x out_feat]
torch::Tensor input, // [batch_size x out_feat]
torch::Tensor gate, // [batch_size]
torch::Tensor weight // [num_expert x out_feat x in_feat]
torch::Tensor grad_output_buf, // [batch_size x out_feat]
torch::Tensor input_buf, // [batch_size x out_feat]
torch::Tensor weight, // [num_expert x out_feat x in_feat]
torch::Tensor expert_count
) {
CHECK_INPUT(grad_output);
CHECK_INPUT(input);
CHECK_INPUT(gate);
CHECK_INPUT(grad_output_buf);
CHECK_INPUT(input_buf);
CHECK_INPUT(weight);
/*
The bias term should have been merged into weight. Note the following fact that
Wx+b = [W b] [x]
[1]
*/
return moe_cuda_backward(grad_output, input, gate, weight);
return moe_cuda_backward(grad_output_buf, input_buf, weight, expert_count);
}
#ifdef MOE_USE_NCCL
std::vector<torch::Tensor> moe_expert_exchange(
torch::Tensor local_expert_count,
size_t num_expert, size_t n_workers) {
return moe_cuda_expert_exchange(local_expert_count, num_expert, n_workers);
}
std::vector<torch::Tensor> moe_global_scatter(
torch::Tensor input_buf,
torch::Tensor local_expert_count,
torch::Tensor global_expert_count,
size_t batch_size, size_t n_workers) {
CHECK_INPUT(input_buf);
return moe_cuda_global_scatter(input_buf,
local_expert_count, global_expert_count,
batch_size, n_workers);
}
std::vector<torch::Tensor> moe_global_gather(
torch::Tensor output_buf,
torch::Tensor local_expert_count,
torch::Tensor global_expert_count,
size_t batch_size, size_t n_workers) {
CHECK_INPUT(output_buf);
return moe_cuda_global_gather(output_buf,
local_expert_count, global_expert_count,
batch_size, n_workers);
}
#endif
/*
int main() {
......@@ -69,6 +109,14 @@ int main() {
*/
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("expert_count", &moe_expert_count, "MoE expert count (CUDA)");
m.def("local_scatter", &moe_local_scatter, "MoE local scatter (CUDA)");
m.def("local_gather", &moe_local_gather, "MoE local gather (CUDA)");
#ifdef MOE_USE_NCCL
m.def("expert_exchange", &moe_expert_exchange, "MoE expert exchange (CUDA)");
m.def("global_scatter", &moe_global_scatter, "MoE global scatter (CUDA)");
m.def("global_gather", &moe_global_gather, "MoE global gather (CUDA)");
#endif
m.def("forward", &moe_forward, "MoE forward (CUDA)");
m.def("backward", &moe_backward, "MoE backward (CUDA)");
}
\ No newline at end of file
}
import math
from torch import nn
from torch.autograd import Function
import torch
import moe_cuda
class MOEFunction(Function):
@staticmethod
def forward(ctx, inp, gate, weight):
out_feat, in_feat = weight.size()[1:]
weight_column_major = weight.transpose(-1, -2).contiguous().view(-1, out_feat, in_feat)
output = moe_cuda.forward(inp, gate, weight_column_major)
variables = [inp, gate, weight_column_major]
ctx.save_for_backward(*variables)
return output[0]
@staticmethod
def backward(ctx, grad_out):
# print("grad_out", grad_out)
# print("input", ctx.saved_tensors[0])
grad_inp, grad_weight = moe_cuda.backward(
grad_out.contiguous(), *ctx.saved_tensors)
out_feat, in_feat = grad_weight.size()[1:]
# print("grad_weight_column_major", grad_weight.flatten())
grad_weight_row_major = grad_weight.view(-1, in_feat, out_feat).transpose(-1, -2).contiguous().view(-1, out_feat, in_feat)
return grad_inp, None, grad_weight_row_major
from moe_function import moe
class MOELayer(nn.Module):
def __init__(self, num_expert=32, in_feat=1024, out_feat=4096):
def __init__(self, num_expert=32, in_feat=1024, out_feat=1024,
world_size=None):
super(MOELayer, self).__init__()
self.num_expert = num_expert
self.in_feat = in_feat
self.out_feat = out_feat
self.world_size = world_size
self.weight = nn.Parameter(
torch.Tensor(num_expert, out_feat, in_feat))
self.reset_parameters()
......@@ -45,22 +23,26 @@ class MOELayer(nn.Module):
self.weight.data[i] = linear.weight.data
def forward(self, inp, gate):
return MOEFunction.apply(inp, gate.int(), self.weight)
return moe(inp, gate.int(), self.weight, self.world_size)
class MOELayer_raw(nn.Module):
def __init__(self, num_expert=32, in_feat=1024, out_feat=4096):
def __init__(self, num_expert=32, in_feat=1024, out_feat=1024,
world_size=0):
super(MOELayer_raw, self).__init__()
self.num_expert = num_expert
self.in_feat = in_feat
self.out_feat = out_feat
self.weight = nn.Parameter(
torch.Tensor(num_expert, out_feat, in_feat))
torch.Tensor(num_expert * world_size, out_feat, in_feat))
self.reset_parameters()
def reset_parameters(self):
for i in range(self.num_expert):
linear = nn.Linear(in_features=self.in_feat, out_features=self.out_feat)
linear = nn.Linear(in_features=self.in_feat,
out_features=self.out_feat)
# print(linear.weight.shape)
self.weight.data[i] = linear.weight.data
def forward(self, inp, gate):
......@@ -68,75 +50,5 @@ class MOELayer_raw(nn.Module):
batch_size = inp.size(0)
x = inp.new_zeros((batch_size, self.out_feat))
for i in range(batch_size):
x[i] = self.weight[gate_long[i]] @ inp[i]
x[i] = inp[i] @ self.weight[gate_long[i]].t()
return x
def test():
torch.manual_seed(42)
torch.cuda.manual_seed(42)
batch_size = 4
num_expert = 4
in_feat = 2
out_feat = 3
linear = nn.Linear(in_feat, in_feat).cuda()
moe = MOELayer(num_expert, in_feat, out_feat).cuda()
moe_raw = MOELayer_raw(num_expert, in_feat, out_feat).cuda()
moe_raw.weight.data = moe.weight.data.clone()
inp = torch.rand(batch_size, in_feat).cuda()
gate = torch.randint(low=0, high=num_expert, size=(batch_size, ), requires_grad=False).int().cuda()
linear.zero_grad()
moe.zero_grad()
x = linear(inp)
output = moe(x, gate)
print("moe output", output)
y = output.mean()
y.backward()
print("moe.weight.grad", moe.weight.grad)
print("linear.weight.grad", linear.weight.grad)
print("linear.bias.grad", linear.bias.grad)
linear.zero_grad()
moe.zero_grad()
x = linear(inp.clone())
output_raw= moe_raw(x, gate.clone())
print("moe_raw output", output_raw)
y_raw = output_raw.mean()
y_raw.backward()
print("moe_raw.weight.grad", moe_raw.weight.grad)
print("linear_raw.weight.grad", linear.weight.grad)
print("linear_raw.bias.grad", linear.bias.grad)
def test_dp():
torch.manual_seed(42)
torch.cuda.manual_seed(42)
batch_size = 6
num_expert = 4
in_feat = 2
out_feat = 3
inp = torch.rand(batch_size, in_feat).cuda()
gate = torch.randint(low=0, high=num_expert, size=(batch_size, ), requires_grad=False).int().cuda()
print("data parallel of a nn.Linear model")
linear = nn.Linear(in_feat, in_feat).cuda()
linear_dp = torch.nn.DataParallel(linear, device_ids=[0,1,2])
output = linear_dp(inp)
print("successful!")
print("data parallel of our MoE model")
moe = MOELayer(num_expert, in_feat, out_feat).cuda()
moe_dp = torch.nn.DataParallel(moe, device_ids=[0,1,2])
for i in range(5):
output = moe_dp(inp, gate)
if __name__ == '__main__':
# test()
test_dp()
\ No newline at end of file
This diff is collapsed.
#ifndef MOE_CUDA_KERNEL_H
#define MOE_CUDA_KERNEL_H
#include <vector>
#include <torch/extension.h>
#include <torch/torch.h>
std::vector<torch::Tensor> moe_cuda_expert_count(
torch::Tensor gate, size_t num_expert);
std::vector<torch::Tensor> moe_cuda_local_scatter(
torch::Tensor input,
torch::Tensor pos);
std::vector<torch::Tensor> moe_cuda_local_gather(
torch::Tensor output_buf,
torch::Tensor pos);
std::vector<torch::Tensor> moe_cuda_forward(
torch::Tensor input_buf,
torch::Tensor weight,
torch::Tensor expert_count);
std::vector<torch::Tensor> moe_cuda_backward(
torch::Tensor grad_output_buf,
torch::Tensor input_buf,
torch::Tensor weight,
torch::Tensor expert_count);
#ifdef MOE_USE_NCCL
std::vector<torch::Tensor> moe_cuda_global_scatter(
torch::Tensor input_buf,
torch::Tensor local_expert_count,
torch::Tensor global_expert_count,
long batch_size, long n_workers);
std::vector<torch::Tensor> moe_cuda_global_gather(
torch::Tensor output_buf,
torch::Tensor local_expert_count,
torch::Tensor global_expert_count,
long batch_size, long n_workers);
std::vector<torch::Tensor> moe_cuda_expert_exchange(
torch::Tensor local_expert_count,
long num_expert, long n_workers);
#endif
#endif // MOE_CUDA_KERNEL_H
import torch
from torch.autograd import Function
import moe_cuda
class MOELocal(Function):
@staticmethod
def forward(ctx, inp, gate, weight):
expert_count, pos = moe_cuda.expert_count(gate, weight.shape[0])
input_buf, = moe_cuda.local_scatter(inp, pos)
output_buf, = moe_cuda.forward(input_buf, weight, expert_count)
output = moe_cuda.local_gather(output_buf, pos)
variables = [input_buf, gate, weight, expert_count, pos]
ctx.save_for_backward(*variables)
return output[0]
@staticmethod
def backward(ctx, grad_out):
input_buf, gate, weight, expert_count, pos = ctx.saved_tensors
grad_out_buf, = moe_cuda.local_scatter(grad_out.contiguous(), pos)
grad_inp_buf, grad_weight = moe_cuda.backward(
grad_out_buf, input_buf, weight, expert_count)
grad_inp, = moe_cuda.local_gather(grad_inp_buf, pos)
return grad_inp, None, grad_weight
class MOEGlobal(Function):
@staticmethod
def forward(ctx, inp, gate, weight, world_size):
num_expert = weight.shape[0]
local_expert_count, pos = moe_cuda.expert_count(gate,
world_size * num_expert)
global_expert_count, fwd_expert_count = moe_cuda.expert_exchange(
local_expert_count, num_expert, world_size)
fwd_batch_size = int(fwd_expert_count.sum().item())
local_input_buf, = moe_cuda.local_scatter(inp, pos)
global_input_buf, = moe_cuda.global_scatter(local_input_buf,
local_expert_count, global_expert_count,
fwd_batch_size, world_size)
global_output_buf, = moe_cuda.forward(global_input_buf, weight,
fwd_expert_count)
local_output_buf, = moe_cuda.global_gather(global_output_buf,
local_expert_count, global_expert_count,
inp.shape[0], world_size)
output, = moe_cuda.local_gather(local_output_buf, pos)
variables = (global_input_buf, gate, weight,
local_expert_count, global_expert_count, fwd_expert_count,
pos)
ctx.moe_args = (num_expert, inp.shape[0], fwd_batch_size, world_size)
ctx.save_for_backward(*variables)
return output
@staticmethod
def backward(ctx, grad_out):
(input_buf, gate, weight,
local_expert_count, global_expert_count, fwd_expert_count,
pos) = ctx.saved_tensors
num_expert, local_batch_size, fwd_batch_size, world_size = ctx.moe_args
grad_out_buf, = moe_cuda.local_scatter(grad_out.contiguous(), pos)
global_grad_out_buf, = moe_cuda.global_scatter(grad_out_buf,
local_expert_count, global_expert_count,
fwd_batch_size, world_size)
grad_inp_buf, grad_weight = moe_cuda.backward(
global_grad_out_buf, input_buf, weight, fwd_expert_count)
local_grad_inp_buf, = moe_cuda.global_gather(grad_inp_buf,
local_expert_count, global_expert_count,
local_batch_size, world_size)
grad_inp, = moe_cuda.local_gather(local_grad_inp_buf, pos)
return grad_inp, None, grad_weight, None
def moe(inp, gate, weight, world_size):
if world_size is not None and world_size > 1:
return MOEGlobal.apply(inp, gate, weight, world_size)
else:
return MOELocal.apply(inp, gate, weight)
from moe import MOELayer
from moe import MOELayer, MOELayer_raw
import torch
from torch import nn
import time
import sys
dev_name_default = 'cuda:0'
def perf():
batch_size = int(sys.argv[1])
in_feat = int(sys.argv[2])
out_feat = int(sys.argv[3])
num_expert = int(sys.argv[4])
torch.manual_seed(42 + torch.distributed.get_rank())
torch.cuda.manual_seed(42 + torch.distributed.get_rank())
if len(sys.argv) == 6:
batch_size = int(sys.argv[2])
in_feat = int(sys.argv[3])
out_feat = int(sys.argv[4])
num_expert = int(sys.argv[5])
else:
batch_size = 4096
in_feat = 1024
out_feat = 4096
num_expert = 4
if torch.distributed.get_rank() == 0:
print('Performance test case bs {} {}x{} ne {}'.format(batch_size,
in_feat, out_feat, num_expert))
if torch.distributed.get_world_size() > 1:
dev_name = 'cuda'
else:
dev_name = dev_name_default
inp = torch.rand(batch_size, in_feat).cuda(dev_name)
gate = torch.randint(low=0,
high=num_expert * torch.distributed.get_world_size(),
size=(batch_size, ), requires_grad=False).int().cuda(dev_name)
inp = torch.rand(batch_size, in_feat).cuda("cuda:1")
gate = torch.randint(low=0, high=num_expert, size=(batch_size, ), requires_grad=False).int().cuda("cuda:1")
moe = MOELayer(num_expert, in_feat, out_feat, world_size).cuda(dev_name)
moe.train()
moe = MOELayer(num_expert, in_feat, out_feat).cuda("cuda:1")
o = moe(inp, gate)
o = moe(inp, gate)
o = moe(inp, gate)
o = moe(inp, gate)
n_runs = 16
tott = 0.
backt = 0.
maxt = 0.
sqtot = 0.
for i in range(n_runs):
gate = torch.randint(low=0, high=num_expert, size=(batch_size, ), requires_grad=False).int().cuda("cuda:1")
gate = torch.randint(low=0,
high=num_expert * torch.distributed.get_world_size(),
size=(batch_size, ), requires_grad=False).int().cuda(dev_name)
ts = time.time()
o = moe(inp, gate)
te = time.time()
loss = o.sum()
bts = time.time()
loss.backward()
bte = time.time()
tott += te - ts
sqtot += (te - ts)**2
maxt = max(maxt, te - ts)
backt = bte - bts
gflops = 2e-9 * n_runs * in_feat * out_feat * batch_size / tott
print('Mean time {:.3f} ms, {:.3f} GFLOPs'.format(tott * 1e3 / n_runs, gflops))
print('Time mean/max/stdev/back {:.3f} {:.3f} {:.3f} {:.3f} ms, {:.3f} GFLOPs'.format(
tott * 1e3 / n_runs, maxt * 1e3,
(sqtot / n_runs - (tott / n_runs)**2) * 1e3 / n_runs,
backt * 1e3 / n_runs, gflops))
def test_module(moe, linear, inp, gate):
linear.zero_grad()
moe.zero_grad()
x = (linear(inp))
output = moe(x, gate)
# print('ooutput', torch.distributed.get_rank(), output)
y = output.mean()
y.backward()
return output, moe.weight.grad, linear.weight.grad, linear.bias.grad
def test():
torch.manual_seed(42 + torch.distributed.get_rank())
torch.cuda.manual_seed(42 + torch.distributed.get_rank())
batch_size = 4
num_expert = 2
in_feat = 6
out_feat = 7
linear = nn.Linear(in_feat, in_feat).cuda()
if world_size > 1:
moe = MOELayer(num_expert, in_feat, out_feat, world_size).cuda()
else:
moe = MOELayer(num_expert, in_feat, out_feat).cuda()
moe_raw = MOELayer_raw(num_expert, in_feat, out_feat, world_size).cuda()
if world_size == 1:
moe_raw.weight.data = moe.weight.data.clone()
else:
weight_array = [torch.empty_like(moe.weight.data).cpu()
for _ in range(world_size)]
torch.distributed.all_gather(weight_array, moe.weight.data.cpu())
moe_raw.weight.data = torch.cat(weight_array, dim=0).cuda()
inp = torch.rand(batch_size, in_feat).cuda()
gate = torch.randint(low=0,
high=num_expert * world_size,
size=(batch_size,),
requires_grad=False).int().cuda()
# gate = torch.Tensor([0, 1, 0, 1]).int().cuda()
moe_out = test_module(moe, linear, inp.clone(), gate.clone())
raw_out = test_module(moe_raw, linear, inp.clone(), gate.clone())
names = ['Out', 'Moe wei', 'Linear wei', 'Linear bias']
if world_size > 1:
rank = torch.distributed.get_rank()
ou, wg, lwg, lbg = raw_out
wg = wg.cpu()
torch.distributed.all_reduce(wg)
wg = wg[rank * num_expert:(rank + 1)* num_expert]
raw_out = ou, wg.cuda(), lwg, lbg
for name, mo, ro in zip(names, moe_out, raw_out):
err = (mo - ro).abs().sum()
print('{} abs err {}'.format(name, err))
def test_dp():
torch.manual_seed(42)
torch.cuda.manual_seed(42)
batch_size = 6
num_expert = 4
in_feat = 2
out_feat = 3
inp = torch.rand(batch_size, in_feat).cuda()
gate = torch.randint(low=0, high=num_expert, size=(batch_size, ), requires_grad=False).int().cuda()
print("data parallel of a nn.Linear model")
linear = nn.Linear(in_feat, in_feat).cuda()
linear_dp = torch.nn.DataParallel(linear, device_ids=[0,1,2])
output = linear_dp(inp)
print("successful!")
print("data parallel of our MoE model")
moe = MOELayer(num_expert, in_feat, out_feat).cuda()
moe_dp = torch.nn.DataParallel(moe, device_ids=[0,1,2])
for i in range(5):
output = moe_dp(inp, gate)
if __name__ == '__main__':
perf()
torch.distributed.init_process_group(backend='mpi')
world_size = torch.distributed.get_world_size()
if len(sys.argv) == 2:
task = sys.argv[1]
print('Specificed task {}'.format(task))
if task == 'correctness':
test()
elif task == 'dp':
test_dp()
elif task == 'performance':
perf()
else:
test()
#!/bin/bash
if [ ! -z $OMPI_COMM_WORLD_LOCAL_RANK ]
then
export CUDA_VISIBLE_DEVICES=$OMPI_COMM_WORLD_LOCAL_RANK
fi
export PYTHONPATH=$PWD/build/lib.linux-x86_64-3.7
export LD_LIBRARY_PATH=/home/laekov/.local/lib/python3.7/site-packages/torch/lib:$LD_LIBRARY_PATH
if [ -z $1 ]
then
python moe.py
elif [ .$1 = '.test_all' ]
then
for bs in 4 16 64
do
for inf in 1024 4096
do
for ouf in 1024 4096
do
for nexp in 4 16 64
do
echo $bs $nexp ${inf}x${ouf}
python moe_test.py $bs $inf $ouf $nexp
done
done
done
done
python3 moe_test.py 2>logs/$OMPI_COMM_WORLD_RANK.log
else
python $@
python3 $@ 2>logs/$OMPI_COMM_WORLD_RANK.log
fi
......@@ -11,13 +11,21 @@ setup(
name='moe_cuda',
sources=[
'moe.cpp',
# 'cuda_stream_manager.cpp',
'cuda_stream_manager.cpp',
'moe_cuda_kernel.cu',
],
extra_compile_args={'cxx': ['-I{}'.format(CUDA_HELPER)],
'nvcc': ['-I{}'.format(CUDA_HELPER)]}
)
],
extra_compile_args={
'cxx': [
'-I{}'.format(CUDA_HELPER),
'-DMOE_USE_NCCL'
],
'nvcc': [
'-I{}'.format(CUDA_HELPER),
'-DMOE_USE_NCCL'
]
}
)
],
cmdclass={
'build_ext': BuildExtension
})
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment