Commit eadbbe09 authored by 401qingkong's avatar 401qingkong
Browse files

push rocm deepspeed v0.3.13

parent ab5534fc
...@@ -4,8 +4,6 @@ ...@@ -4,8 +4,6 @@
[![License MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://github.com/Microsoft/DeepSpeed/blob/master/LICENSE) [![License MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://github.com/Microsoft/DeepSpeed/blob/master/LICENSE)
[![Docker Pulls](https://img.shields.io/docker/pulls/deepspeed/deepspeed)](https://hub.docker.com/r/deepspeed/deepspeed) [![Docker Pulls](https://img.shields.io/docker/pulls/deepspeed/deepspeed)](https://hub.docker.com/r/deepspeed/deepspeed)
### 03/2021: DeepSpeed is hiring! Come join us: [SDE 2](https://careers.microsoft.com/us/en/job/1013160/Software-Engineer-2), [Sr. SDE](https://careers.microsoft.com/us/en/job/1017151/Senior-Software-Engineer), [Sr. Researcher](https://careers.microsoft.com/us/en/job/1016440/Senior-Researcher)
[DeepSpeed](https://www.deepspeed.ai/) is a deep learning optimization [DeepSpeed](https://www.deepspeed.ai/) is a deep learning optimization
library that makes distributed training easy, efficient, and effective. library that makes distributed training easy, efficient, and effective.
...@@ -33,11 +31,7 @@ information [here](https://innovation.microsoft.com/en-us/exploring-ai-at-scale) ...@@ -33,11 +31,7 @@ information [here](https://innovation.microsoft.com/en-us/exploring-ai-at-scale)
# News # News
* [2021/04/01] [[DeepSpeed on AzureML] Transformers and CIFAR examples are now available on AzureML GitHub](https://github.com/Azure/azureml-examples/tree/main/workflows/train/deepspeed)
* [2021/03/30] [[PyTorch Lightning Blog] Accessible Multi-Billion Parameter Model Training with PyTorch Lightning + DeepSpeed](https://medium.com/pytorch-lightning/accessible-multi-billion-parameter-model-training-with-pytorch-lightning-deepspeed-c9333ac3bb59)
* [2021/03/16] [1-bit Adam v2: NCCL-based implementation and more](https://www.deepspeed.ai/tutorials/onebit-adam/)
* [2021/03/08] [ZeRO-3 Offload: Scale your models to trillion parameters without code changes while leveraging both CPUs & GPUs](https://www.deepspeed.ai/news/2021/03/07/zero3-offload.html) * [2021/03/08] [ZeRO-3 Offload: Scale your models to trillion parameters without code changes while leveraging both CPUs & GPUs](https://www.deepspeed.ai/news/2021/03/07/zero3-offload.html)
* [2021/01/19] [[🤗Hugging Face Blog] Fit More and Train Faster With ZeRO via DeepSpeed and FairScale](https://huggingface.co/blog/zero-deepspeed-fairscale)
* [2020/11/12] [Simplified install, JIT compiled ops, PyPI releases, and reduced dependencies](#installation) * [2020/11/12] [Simplified install, JIT compiled ops, PyPI releases, and reduced dependencies](#installation)
* [2020/11/10] [Efficient and robust compressed training through progressive layer dropping](https://www.deepspeed.ai/news/2020/10/28/progressive-layer-dropping-news.html) * [2020/11/10] [Efficient and robust compressed training through progressive layer dropping](https://www.deepspeed.ai/news/2020/10/28/progressive-layer-dropping-news.html)
* [2020/09/10] [DeepSpeed v0.3: Extreme-scale model training for everyone](https://www.microsoft.com/en-us/research/blog/deepspeed-extreme-scale-model-training-for-everyone/) * [2020/09/10] [DeepSpeed v0.3: Extreme-scale model training for everyone](https://www.microsoft.com/en-us/research/blog/deepspeed-extreme-scale-model-training-for-everyone/)
...@@ -45,6 +39,7 @@ information [here](https://innovation.microsoft.com/en-us/exploring-ai-at-scale) ...@@ -45,6 +39,7 @@ information [here](https://innovation.microsoft.com/en-us/exploring-ai-at-scale)
* [Training a trillion parameters with pipeline parallelism](https://www.deepspeed.ai/news/2020/09/08/pipeline-parallelism.html) * [Training a trillion parameters with pipeline parallelism](https://www.deepspeed.ai/news/2020/09/08/pipeline-parallelism.html)
* [Up to 5x less communication and 3.4x faster training through 1-bit Adam](https://www.deepspeed.ai/news/2020/09/08/onebit-adam-news.html) * [Up to 5x less communication and 3.4x faster training through 1-bit Adam](https://www.deepspeed.ai/news/2020/09/08/onebit-adam-news.html)
* [10x bigger model training on a single GPU with ZeRO-Offload](https://www.deepspeed.ai/news/2020/09/08/ZeRO-Offload.html) * [10x bigger model training on a single GPU with ZeRO-Offload](https://www.deepspeed.ai/news/2020/09/08/ZeRO-Offload.html)
* [2020/08/07] [DeepSpeed Microsoft Research Webinar](https://note.microsoft.com/MSR-Webinar-DeepSpeed-Registration-On-Demand.html) is now available on-demand
# Table of Contents # Table of Contents
......
"""
Copyright 2020 The Microsoft DeepSpeed Team
DeepSpeed library
Create a new wheel via the following command: python setup.py bdist_wheel
The wheel will be located at: dist/*.whl
"""
import os
import shutil
import subprocess
import warnings
from setuptools import setup, find_packages
import time
try:
import torch
from torch.utils.cpp_extension import BuildExtension, CUDAExtension, CppExtension
from torch.utils.hipify import hipify_python
except ImportError:
raise ImportError('Unable to import torch, please visit https://pytorch.org/ '
'to see how to properly install torch on your system.')
###aiss add
is_rocm_pytorch = False
if torch.__version__ >= '1.5':
from torch.utils.cpp_extension import ROCM_HOME
is_rocm_pytorch = True if ((torch.version.hip is not None) and (ROCM_HOME is not None)) else False
from op_builder import ALL_OPS, get_default_compute_capatabilities
def fetch_requirements(path):
with open(path, 'r') as fd:
return [r.strip() for r in fd.readlines()]
install_requires = fetch_requirements('requirements/requirements.txt')
extras_require = {
'1bit_adam': fetch_requirements('requirements/requirements-1bit-adam.txt'),
'readthedocs': fetch_requirements('requirements/requirements-readthedocs.txt'),
'dev': fetch_requirements('requirements/requirements-dev.txt'),
}
###aiss add ################
if is_rocm_pytorch:
print("NOTE: Please manually install torch and torchvision packages for ROCm")
install_requires = fetch_requirements('requirements/requirements-rocm.txt')
# If MPI is available add 1bit-adam requirements
if torch.cuda.is_available():
if shutil.which('ompi_info') or shutil.which('mpiname'):
cupy = f"cupy-cuda{torch.version.cuda.replace('.','')[:3]}"
extras_require['1bit_adam'].append(cupy)
# Make an [all] extra that installs all needed dependencies
all_extras = set()
for extra in extras_require.items():
for req in extra[1]:
all_extras.add(req)
extras_require['all'] = list(all_extras)
cmdclass = {}
# For any pre-installed ops force disable ninja
cmdclass['build_ext'] = BuildExtension.with_options(use_ninja=False)
TORCH_MAJOR = torch.__version__.split('.')[0]
TORCH_MINOR = torch.__version__.split('.')[1]
if not torch.cuda.is_available():
# Fix to allow docker builds, similar to https://github.com/NVIDIA/apex/issues/486
print(
"[WARNING] Torch did not find cuda available, if cross-compiling or running with cpu only "
"you can ignore this message. Adding compute capability for Pascal, Volta, and Turing "
"(compute capabilities 6.0, 6.1, 6.2)")
if os.environ.get("TORCH_CUDA_ARCH_LIST", None) is None:
os.environ["TORCH_CUDA_ARCH_LIST"] = get_default_compute_capatabilities()
###########aiss add ######################only need run once for convert
#if is_rocm_pytorch:
# import shutil
# this_dir = os.path.dirname(os.path.abspath(__file__))
# hipify_python.hipify(project_directory=this_dir, output_directory=this_dir, includes="csrc/*",
# show_detailed=True, is_pytorch_extension=True)
# print("cuda file has been transformed to hip format!!!")
ext_modules = []
# Default to pre-install kernels to false so we rely on JIT
BUILD_OP_DEFAULT = int(os.environ.get('DS_BUILD_OPS', 0))
print(f"DS_BUILD_OPS={BUILD_OP_DEFAULT}")
def command_exists(cmd):
result = subprocess.Popen(f'type {cmd}', stdout=subprocess.PIPE, shell=True)
return result.wait() == 0
def op_enabled(op_name):
assert hasattr(ALL_OPS[op_name], 'BUILD_VAR'), \
f"{op_name} is missing BUILD_VAR field"
env_var = ALL_OPS[op_name].BUILD_VAR
return int(os.environ.get(env_var, BUILD_OP_DEFAULT))
install_ops = dict.fromkeys(ALL_OPS.keys(), False)
for op_name, builder in ALL_OPS.items():
op_compatible = builder.is_compatible()
# If op is compatible update install reqs so it can potentially build/run later
if op_compatible:
reqs = builder.python_requirements()
install_requires += builder.python_requirements()
# If op install enabled, add builder to extensions
if op_enabled(op_name) and op_compatible:
install_ops[op_name] = op_enabled(op_name)
ext_modules.append(builder.builder())
compatible_ops = {op_name: op.is_compatible() for (op_name, op) in ALL_OPS.items()}
print(f'Install Ops={install_ops}')
# Write out version/git info
git_hash_cmd = "git rev-parse --short HEAD"
git_branch_cmd = "git rev-parse --abbrev-ref HEAD"
if command_exists('git') and 'DS_BUILD_STRING' not in os.environ:
try:
result = subprocess.check_output(git_hash_cmd, shell=True)
git_hash = result.decode('utf-8').strip()
result = subprocess.check_output(git_branch_cmd, shell=True)
git_branch = result.decode('utf-8').strip()
except subprocess.CalledProcessError:
git_hash = "unknown"
git_branch = "unknown"
else:
git_hash = "unknown"
git_branch = "unknown"
# Parse the DeepSpeed version string from version.txt
version_str = open('version.txt', 'r').read().strip()
# Build specifiers like .devX can be added at install time. Otherwise, add the git hash.
# example: DS_BUILD_STR=".dev20201022" python setup.py sdist bdist_wheel
# Building wheel for distribution, update version file
if 'DS_BUILD_STRING' in os.environ:
# Build string env specified, probably building for distribution
with open('build.txt', 'w') as fd:
fd.write(os.environ.get('DS_BUILD_STRING'))
version_str += os.environ.get('DS_BUILD_STRING')
elif os.path.isfile('build.txt'):
# build.txt exists, probably installing from distribution
with open('build.txt', 'r') as fd:
version_str += fd.read().strip()
else:
# None of the above, probably installing from source
version_str += f'+{git_hash}'
torch_version = ".".join([TORCH_MAJOR, TORCH_MINOR])
# Set cuda_version to 0.0 if cpu-only
cuda_version = "0.0"
if torch.version.cuda is not None:
cuda_version = ".".join(torch.version.cuda.split('.')[:2])
torch_info = {"version": torch_version, "cuda_version": cuda_version}
print(f"version={version_str}, git_hash={git_hash}, git_branch={git_branch}")
with open('deepspeed/git_version_info_installed.py', 'w') as fd:
fd.write(f"version='{version_str}'\n")
fd.write(f"git_hash='{git_hash}'\n")
fd.write(f"git_branch='{git_branch}'\n")
fd.write(f"installed_ops={install_ops}\n")
fd.write(f"compatible_ops={compatible_ops}\n")
fd.write(f"torch_info={torch_info}\n")
print(f'install_requires={install_requires}')
print(f'compatible_ops={compatible_ops}')
print(f'ext_modules={ext_modules}')
# Parse README.md to make long_description for PyPI page.
thisdir = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(thisdir, 'README.md'), encoding='utf-8') as fin:
readme_text = fin.read()
start_time = time.time()
setup(name='deepspeed',
version=version_str,
description='DeepSpeed library',
long_description=readme_text,
long_description_content_type='text/markdown',
author='DeepSpeed Team',
author_email='deepspeed@microsoft.com',
url='http://deepspeed.ai',
install_requires=install_requires,
extras_require=extras_require,
packages=find_packages(exclude=["docker",
"third_party"]),
include_package_data=True,
scripts=[
'bin/deepspeed',
'bin/deepspeed.pt',
'bin/ds',
'bin/ds_ssh',
'bin/ds_report',
'bin/ds_elastic'
],
classifiers=[
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8'
],
license='MIT',
ext_modules=ext_modules,
cmdclass=cmdclass)
end_time = time.time()
print(f'deepspeed build time = {end_time - start_time} secs')
ds
\ No newline at end of file
#!/usr/bin/env python
from deepspeed.launcher.runner import main
if __name__ == '__main__':
main()
ds
\ No newline at end of file
#!/usr/bin/env python
from deepspeed.launcher.runner import main
if __name__ == '__main__':
main()
/* Copyright 2020 The Microsoft DeepSpeed Team
Copyright NVIDIA/apex
This file is adapted from fused adam in NVIDIA/apex, commit a109f85
*/
#ifndef TORCH_CHECK
#define TORCH_CHECK AT_CHECK
#endif
#ifdef VERSION_GE_1_3
#define DATA_PTR data_ptr
#else
#define DATA_PTR data
#endif
#include <hip/hip_runtime_api.h>
#include <math.h>
#include <omp.h>
#include <torch/extension.h>
#include <iostream>
#include <memory>
#include <type_traits>
#include <unordered_map>
#include "rocblas.h"
#include "hip/hip_runtime.h"
#include "hiprand.h"
#ifdef __HIP_PLATFORM_HCC__
#include "hip/custom_hip_layers.h"
#include "hip/cpu_adam.h"
#else
#include "custom_cuda_layers.h"
#include "cpu_adam.h"
#endif
static std::unordered_map<int, std::shared_ptr<void>> s_optimizers;
#define ROUND_DOWN(size, step) ((size) & ~((step)-1))
// C++ interface
void Adam_Optimizer::Step(float* _params,
float* grads,
float* _exp_avg,
float* _exp_avg_sq,
size_t _param_size,
__half* dev_params)
{
float betta1_minus1 = 1 - _betta1;
float betta2_minus1 = 1 - _betta2;
float step_size = -1 * _alpha / _bias_correction1;
float w_decay = -1 * _alpha * _weight_decay;
size_t rounded_size = 0;
#if defined(__AVX512__) or defined(__AVX256__)
AVX_Data betta1_4;
betta1_4.data = SIMD_SET(_betta1);
AVX_Data betta2_4;
betta2_4.data = SIMD_SET(_betta2);
AVX_Data betta1_minus1_4;
betta1_minus1_4.data = SIMD_SET(betta1_minus1);
AVX_Data betta2_minus1_4;
betta2_minus1_4.data = SIMD_SET(betta2_minus1);
AVX_Data bias2_sqrt;
bias2_sqrt.data = SIMD_SET(_bias_correction2);
AVX_Data eps_4;
eps_4.data = SIMD_SET(_eps);
AVX_Data step_size_4;
step_size_4.data = SIMD_SET(step_size);
AVX_Data weight_decay4;
if (_weight_decay > 0)
weight_decay4.data = (_adamw_mode ? SIMD_SET(w_decay) : SIMD_SET(_weight_decay));
rounded_size = ROUND_DOWN(_param_size, SIMD_WIDTH);
for (size_t t = 0; t < rounded_size; t += TILE) {
size_t copy_size = TILE;
if ((t + TILE) > rounded_size) copy_size = rounded_size - t;
size_t offset = copy_size + t;
if ((t / TILE) >= 2) { hipStreamSynchronize(_streams[_buf_index]); }
#pragma omp parallel for
for (size_t i = t; i < offset; i += SIMD_WIDTH) {
AVX_Data grad_4;
grad_4.data = SIMD_LOAD(grads + i);
AVX_Data momentum_4;
momentum_4.data = SIMD_LOAD(_exp_avg + i);
AVX_Data variance_4;
variance_4.data = SIMD_LOAD(_exp_avg_sq + i);
AVX_Data param_4;
param_4.data = SIMD_LOAD(_params + i);
if (_weight_decay > 0 && !_adamw_mode) {
grad_4.data = SIMD_FMA(param_4.data, weight_decay4.data, grad_4.data);
}
momentum_4.data = SIMD_MUL(momentum_4.data, betta1_4.data);
momentum_4.data = SIMD_FMA(grad_4.data, betta1_minus1_4.data, momentum_4.data);
variance_4.data = SIMD_MUL(variance_4.data, betta2_4.data);
grad_4.data = SIMD_MUL(grad_4.data, grad_4.data);
variance_4.data = SIMD_FMA(grad_4.data, betta2_minus1_4.data, variance_4.data);
grad_4.data = SIMD_SQRT(variance_4.data);
grad_4.data = SIMD_FMA(grad_4.data, bias2_sqrt.data, eps_4.data);
grad_4.data = SIMD_DIV(momentum_4.data, grad_4.data);
if (_weight_decay > 0 && _adamw_mode) {
param_4.data = SIMD_FMA(param_4.data, weight_decay4.data, param_4.data);
}
param_4.data = SIMD_FMA(grad_4.data, step_size_4.data, param_4.data);
SIMD_STORE(_params + i, param_4.data);
if (dev_params) SIMD_STORE(_doubled_buffer[_buf_index] + (i - t), param_4.data);
SIMD_STORE(_exp_avg + i, momentum_4.data);
SIMD_STORE(_exp_avg_sq + i, variance_4.data);
}
if (dev_params) {
launch_param_update(
_doubled_buffer[_buf_index], dev_params + t, copy_size, _streams[_buf_index]);
_buf_index = !_buf_index;
}
}
#endif
if (_param_size > rounded_size) {
for (size_t t = rounded_size; t < _param_size; t += TILE) {
size_t copy_size = TILE;
if ((t + TILE) > _param_size) copy_size = _param_size - t;
size_t offset = copy_size + t;
if ((t / TILE) >= 2) { hipStreamSynchronize(_streams[_buf_index]); }
#pragma omp parallel for
for (size_t k = t; k < offset; k++) {
float grad = grads[k];
float param = _params[k];
float momentum = _exp_avg[k];
float variance = _exp_avg_sq[k];
if (_weight_decay > 0 && !_adamw_mode) { grad = param * _weight_decay + grad; }
momentum = momentum * _betta1;
momentum = grad * betta1_minus1 + momentum;
variance = variance * _betta2;
grad = grad * grad;
variance = grad * betta2_minus1 + variance;
grad = sqrt(variance);
grad = grad * _bias_correction2 + _eps;
grad = momentum / grad;
if (_weight_decay > 0 && _adamw_mode) { param += w_decay * param; }
param = grad * step_size + param;
if (dev_params) _doubled_buffer[_buf_index][k - t] = param;
_params[k] = param;
_exp_avg[k] = momentum;
_exp_avg_sq[k] = variance;
}
if (dev_params) {
launch_param_update(
_doubled_buffer[_buf_index], dev_params + t, (copy_size), _streams[_buf_index]);
_buf_index = !_buf_index;
}
}
}
}
void Adam_Optimizer::Step_4(float* _params,
float* grads,
float* _exp_avg,
float* _exp_avg_sq,
size_t _param_size,
__half* dev_params)
{
size_t rounded_size = 0;
#if defined(__AVX512__) or defined(__AVX256__)
AVX_Data betta1_4;
betta1_4.data = SIMD_SET(_betta1);
AVX_Data betta2_4;
betta2_4.data = SIMD_SET(_betta2);
float betta1_minus1 = 1 - _betta1;
float betta2_minus1 = 1 - _betta2;
AVX_Data betta1_minus1_4;
betta1_minus1_4.data = SIMD_SET(betta1_minus1);
AVX_Data betta2_minus1_4;
betta2_minus1_4.data = SIMD_SET(betta2_minus1);
AVX_Data bias2_sqrt;
bias2_sqrt.data = SIMD_SET(_bias_correction2);
AVX_Data eps_4;
eps_4.data = SIMD_SET(_eps);
float step_size = -1 * _alpha / _bias_correction1;
AVX_Data step_size_4;
step_size_4.data = SIMD_SET(step_size);
float w_decay = -1 * _alpha * _weight_decay;
AVX_Data weight_decay4;
if (_weight_decay > 0)
weight_decay4.data = (_adamw_mode ? SIMD_SET(w_decay) : SIMD_SET(_weight_decay));
rounded_size = ROUND_DOWN(_param_size, (SIMD_WIDTH << 2));
for (size_t t = 0; t < rounded_size; t += TILE) {
size_t copy_size = TILE;
if ((t + TILE) > rounded_size) copy_size = rounded_size - t;
size_t offset = copy_size + t;
if ((t / TILE) >= 2) { hipStreamSynchronize(_streams[_buf_index]); }
#pragma omp parallel for
for (size_t i = t; i < offset; i += (SIMD_WIDTH << 2)) {
AVX_Data grad_4[4];
grad_4[0].data = SIMD_LOAD(grads + i);
grad_4[1].data = SIMD_LOAD(grads + i + SIMD_WIDTH);
grad_4[2].data = SIMD_LOAD(grads + i + (SIMD_WIDTH << 1));
grad_4[3].data = SIMD_LOAD(grads + i + SIMD_WIDTH * 3);
AVX_Data momentum_4[4];
momentum_4[0].data = SIMD_LOAD(_exp_avg + i);
momentum_4[1].data = SIMD_LOAD(_exp_avg + i + SIMD_WIDTH);
momentum_4[2].data = SIMD_LOAD(_exp_avg + i + (SIMD_WIDTH << 1));
momentum_4[3].data = SIMD_LOAD(_exp_avg + i + SIMD_WIDTH * 3);
AVX_Data variance_4[4];
variance_4[0].data = SIMD_LOAD(_exp_avg_sq + i);
variance_4[1].data = SIMD_LOAD(_exp_avg_sq + i + SIMD_WIDTH);
variance_4[2].data = SIMD_LOAD(_exp_avg_sq + i + (SIMD_WIDTH << 1));
variance_4[3].data = SIMD_LOAD(_exp_avg_sq + i + SIMD_WIDTH * 3);
AVX_Data param_4[4];
param_4[0].data = SIMD_LOAD(_params + i);
param_4[1].data = SIMD_LOAD(_params + i + SIMD_WIDTH);
param_4[2].data = SIMD_LOAD(_params + i + (SIMD_WIDTH << 1));
param_4[3].data = SIMD_LOAD(_params + i + SIMD_WIDTH * 3);
if (_weight_decay > 0 && !_adamw_mode) {
grad_4[0].data = SIMD_FMA(param_4[0].data, weight_decay4.data, grad_4[0].data);
grad_4[1].data = SIMD_FMA(param_4[1].data, weight_decay4.data, grad_4[1].data);
grad_4[2].data = SIMD_FMA(param_4[2].data, weight_decay4.data, grad_4[2].data);
grad_4[3].data = SIMD_FMA(param_4[3].data, weight_decay4.data, grad_4[3].data);
}
momentum_4[0].data = SIMD_MUL(momentum_4[0].data, betta1_4.data);
momentum_4[0].data = SIMD_FMA(grad_4[0].data, betta1_minus1_4.data, momentum_4[0].data);
momentum_4[1].data = SIMD_MUL(momentum_4[1].data, betta1_4.data);
momentum_4[1].data = SIMD_FMA(grad_4[1].data, betta1_minus1_4.data, momentum_4[1].data);
momentum_4[2].data = SIMD_MUL(momentum_4[2].data, betta1_4.data);
momentum_4[2].data = SIMD_FMA(grad_4[2].data, betta1_minus1_4.data, momentum_4[2].data);
momentum_4[3].data = SIMD_MUL(momentum_4[3].data, betta1_4.data);
momentum_4[3].data = SIMD_FMA(grad_4[3].data, betta1_minus1_4.data, momentum_4[3].data);
variance_4[0].data = SIMD_MUL(variance_4[0].data, betta2_4.data);
variance_4[1].data = SIMD_MUL(variance_4[1].data, betta2_4.data);
variance_4[2].data = SIMD_MUL(variance_4[2].data, betta2_4.data);
variance_4[3].data = SIMD_MUL(variance_4[3].data, betta2_4.data);
grad_4[0].data = SIMD_MUL(grad_4[0].data, grad_4[0].data);
grad_4[1].data = SIMD_MUL(grad_4[1].data, grad_4[1].data);
grad_4[2].data = SIMD_MUL(grad_4[2].data, grad_4[2].data);
grad_4[3].data = SIMD_MUL(grad_4[3].data, grad_4[3].data);
variance_4[0].data = SIMD_FMA(grad_4[0].data, betta2_minus1_4.data, variance_4[0].data);
variance_4[1].data = SIMD_FMA(grad_4[1].data, betta2_minus1_4.data, variance_4[1].data);
variance_4[2].data = SIMD_FMA(grad_4[2].data, betta2_minus1_4.data, variance_4[2].data);
variance_4[3].data = SIMD_FMA(grad_4[3].data, betta2_minus1_4.data, variance_4[3].data);
grad_4[0].data = SIMD_SQRT(variance_4[0].data);
grad_4[1].data = SIMD_SQRT(variance_4[1].data);
grad_4[2].data = SIMD_SQRT(variance_4[2].data);
grad_4[3].data = SIMD_SQRT(variance_4[3].data);
grad_4[0].data = SIMD_FMA(grad_4[0].data, bias2_sqrt.data, eps_4.data);
grad_4[1].data = SIMD_FMA(grad_4[1].data, bias2_sqrt.data, eps_4.data);
grad_4[2].data = SIMD_FMA(grad_4[2].data, bias2_sqrt.data, eps_4.data);
grad_4[3].data = SIMD_FMA(grad_4[3].data, bias2_sqrt.data, eps_4.data);
grad_4[0].data = SIMD_DIV(momentum_4[0].data, grad_4[0].data);
grad_4[1].data = SIMD_DIV(momentum_4[1].data, grad_4[1].data);
grad_4[2].data = SIMD_DIV(momentum_4[2].data, grad_4[2].data);
grad_4[3].data = SIMD_DIV(momentum_4[3].data, grad_4[3].data);
if (_weight_decay > 0 && _adamw_mode) {
param_4[0].data = SIMD_FMA(param_4[0].data, weight_decay4.data, param_4[0].data);
param_4[1].data = SIMD_FMA(param_4[1].data, weight_decay4.data, param_4[1].data);
param_4[2].data = SIMD_FMA(param_4[2].data, weight_decay4.data, param_4[2].data);
param_4[3].data = SIMD_FMA(param_4[3].data, weight_decay4.data, param_4[3].data);
}
param_4[0].data = SIMD_FMA(grad_4[0].data, step_size_4.data, param_4[0].data);
param_4[1].data = SIMD_FMA(grad_4[1].data, step_size_4.data, param_4[1].data);
param_4[2].data = SIMD_FMA(grad_4[2].data, step_size_4.data, param_4[2].data);
param_4[3].data = SIMD_FMA(grad_4[3].data, step_size_4.data, param_4[3].data);
SIMD_STORE(_params + i, param_4[0].data);
SIMD_STORE(_params + i + SIMD_WIDTH, param_4[1].data);
SIMD_STORE(_params + i + (SIMD_WIDTH << 1), param_4[2].data);
SIMD_STORE(_params + i + SIMD_WIDTH * 3, param_4[3].data);
if (dev_params) {
SIMD_STORE(_doubled_buffer[_buf_index] + (i - t), param_4[0].data);
SIMD_STORE(_doubled_buffer[_buf_index] + (i - t) + SIMD_WIDTH, param_4[1].data);
SIMD_STORE(_doubled_buffer[_buf_index] + (i - t) + (SIMD_WIDTH << 1),
param_4[2].data);
SIMD_STORE(_doubled_buffer[_buf_index] + (i - t) + SIMD_WIDTH * 3, param_4[3].data);
}
SIMD_STORE(_exp_avg + i, momentum_4[0].data);
SIMD_STORE(_exp_avg + i + SIMD_WIDTH, momentum_4[1].data);
SIMD_STORE(_exp_avg + i + (SIMD_WIDTH << 1), momentum_4[2].data);
SIMD_STORE(_exp_avg + i + SIMD_WIDTH * 3, momentum_4[3].data);
SIMD_STORE(_exp_avg_sq + i, variance_4[0].data);
SIMD_STORE(_exp_avg_sq + i + SIMD_WIDTH, variance_4[1].data);
SIMD_STORE(_exp_avg_sq + i + (SIMD_WIDTH << 1), variance_4[2].data);
SIMD_STORE(_exp_avg_sq + i + SIMD_WIDTH * 3, variance_4[3].data);
}
if (dev_params) {
launch_param_update(
_doubled_buffer[_buf_index], dev_params + t, copy_size, _streams[_buf_index]);
_buf_index = !_buf_index;
}
}
#endif
if (_param_size > rounded_size)
Step((_params + rounded_size),
(grads + rounded_size),
(_exp_avg + rounded_size),
(_exp_avg_sq + rounded_size),
(_param_size - rounded_size),
(dev_params != nullptr ? (dev_params + rounded_size) : dev_params));
}
int create_adam_optimizer(int optimizer_id,
float alpha = 1e-3,
float betta1 = 0.9,
float betta2 = 0.999,
float eps = 1e-8,
float weight_decay = 0,
bool adamw_mode = true)
{
auto opt =
std::make_shared<Adam_Optimizer>(alpha, betta1, betta2, eps, weight_decay, adamw_mode);
s_optimizers[optimizer_id] = opt;
#if defined(__AVX512__)
std::cout << "Adam Optimizer #" << optimizer_id
<< " is created with AVX512 arithmetic capability." << std::endl;
printf("Config: alpha=%f, betas=(%f, %f), weight_decay=%f, adam_w=%d\n",
alpha,
betta1,
betta2,
weight_decay,
(int)adamw_mode);
#else
#if defined(__AVX256__)
std::cout << "Adam Optimizer #" << optimizer_id
<< " is created with AVX2 arithmetic capability." << std::endl;
printf("Config: alpha=%f, betas=(%f, %f), weight_decay=%f, adam_w=%d\n",
alpha,
betta1,
betta2,
weight_decay,
(int)adamw_mode);
#else
std::cout << "Adam Optimizer #" << optimizer_id
<< " is created with scalar arithmetic capability." << std::endl;
printf("Config: alpha=%f, betas=(%f, %f), weight_decay=%f, adam_w=%d\n",
alpha,
betta1,
betta2,
weight_decay,
(int)adamw_mode);
#endif
#endif
return 0;
}
void Adam_Optimizer::Step_8(float* _params,
float* grads,
float* _exp_avg,
float* _exp_avg_sq,
size_t _param_size,
__half* dev_params)
{
size_t rounded_size = 0;
#if defined(__AVX512__) or defined(__AVX256__)
AVX_Data betta1_4;
betta1_4.data = SIMD_SET(_betta1);
AVX_Data betta2_4;
betta2_4.data = SIMD_SET(_betta2);
float betta1_minus1 = 1 - _betta1;
float betta2_minus1 = 1 - _betta2;
AVX_Data betta1_minus1_4;
betta1_minus1_4.data = SIMD_SET(betta1_minus1);
AVX_Data betta2_minus1_4;
betta2_minus1_4.data = SIMD_SET(betta2_minus1);
AVX_Data bias2_sqrt;
bias2_sqrt.data = SIMD_SET(_bias_correction2);
AVX_Data eps_4;
eps_4.data = SIMD_SET(_eps);
float step_size = -1 * _alpha / _bias_correction1;
AVX_Data step_size_4;
step_size_4.data = SIMD_SET(step_size);
float w_decay = -1 * _alpha * _weight_decay;
AVX_Data weight_decay4;
if (_weight_decay > 0)
weight_decay4.data = (_adamw_mode ? SIMD_SET(w_decay) : SIMD_SET(_weight_decay));
rounded_size = ROUND_DOWN(_param_size, (SIMD_WIDTH << 3));
for (size_t t = 0; t < rounded_size; t += TILE) {
size_t copy_size = TILE;
if ((t + TILE) > rounded_size) copy_size = rounded_size - t;
size_t offset = copy_size + t;
if ((t / TILE) >= 2) { hipStreamSynchronize(_streams[_buf_index]); }
#pragma omp parallel for
for (size_t i = t; i < offset; i += (SIMD_WIDTH << 3)) {
AVX_Data grad_4[8];
grad_4[0].data = SIMD_LOAD(grads + i);
grad_4[1].data = SIMD_LOAD(grads + i + SIMD_WIDTH);
grad_4[2].data = SIMD_LOAD(grads + i + (SIMD_WIDTH << 1));
grad_4[3].data = SIMD_LOAD(grads + i + SIMD_WIDTH * 3);
grad_4[4].data = SIMD_LOAD(grads + i + (SIMD_WIDTH << 2));
grad_4[5].data = SIMD_LOAD(grads + i + SIMD_WIDTH * 5);
grad_4[6].data = SIMD_LOAD(grads + i + SIMD_WIDTH * 6);
grad_4[7].data = SIMD_LOAD(grads + i + SIMD_WIDTH * 7);
AVX_Data momentum_4[8];
momentum_4[0].data = SIMD_LOAD(_exp_avg + i);
momentum_4[1].data = SIMD_LOAD(_exp_avg + i + SIMD_WIDTH);
momentum_4[2].data = SIMD_LOAD(_exp_avg + i + (SIMD_WIDTH << 1));
momentum_4[3].data = SIMD_LOAD(_exp_avg + i + SIMD_WIDTH * 3);
momentum_4[4].data = SIMD_LOAD(_exp_avg + i + (SIMD_WIDTH << 2));
momentum_4[5].data = SIMD_LOAD(_exp_avg + i + SIMD_WIDTH * 5);
momentum_4[6].data = SIMD_LOAD(_exp_avg + i + SIMD_WIDTH * 6);
momentum_4[7].data = SIMD_LOAD(_exp_avg + i + SIMD_WIDTH * 7);
AVX_Data variance_4[8];
variance_4[0].data = SIMD_LOAD(_exp_avg_sq + i);
variance_4[1].data = SIMD_LOAD(_exp_avg_sq + i + SIMD_WIDTH);
variance_4[2].data = SIMD_LOAD(_exp_avg_sq + i + (SIMD_WIDTH << 1));
variance_4[3].data = SIMD_LOAD(_exp_avg_sq + i + SIMD_WIDTH * 3);
variance_4[4].data = SIMD_LOAD(_exp_avg_sq + i + (SIMD_WIDTH << 2));
variance_4[5].data = SIMD_LOAD(_exp_avg_sq + i + SIMD_WIDTH * 5);
variance_4[6].data = SIMD_LOAD(_exp_avg_sq + i + SIMD_WIDTH * 6);
variance_4[7].data = SIMD_LOAD(_exp_avg_sq + i + SIMD_WIDTH * 7);
AVX_Data param_4[8];
param_4[0].data = SIMD_LOAD(_params + i);
param_4[1].data = SIMD_LOAD(_params + i + SIMD_WIDTH);
param_4[2].data = SIMD_LOAD(_params + i + (SIMD_WIDTH << 1));
param_4[3].data = SIMD_LOAD(_params + i + SIMD_WIDTH * 3);
param_4[4].data = SIMD_LOAD(_params + i + (SIMD_WIDTH << 2));
param_4[5].data = SIMD_LOAD(_params + i + SIMD_WIDTH * 5);
param_4[6].data = SIMD_LOAD(_params + i + SIMD_WIDTH * 6);
param_4[7].data = SIMD_LOAD(_params + i + SIMD_WIDTH * 7);
if (_weight_decay > 0 && !_adamw_mode) {
grad_4[0].data = SIMD_FMA(param_4[0].data, weight_decay4.data, grad_4[0].data);
grad_4[1].data = SIMD_FMA(param_4[1].data, weight_decay4.data, grad_4[1].data);
grad_4[2].data = SIMD_FMA(param_4[2].data, weight_decay4.data, grad_4[2].data);
grad_4[3].data = SIMD_FMA(param_4[3].data, weight_decay4.data, grad_4[3].data);
grad_4[4].data = SIMD_FMA(param_4[4].data, weight_decay4.data, grad_4[4].data);
grad_4[5].data = SIMD_FMA(param_4[5].data, weight_decay4.data, grad_4[5].data);
grad_4[6].data = SIMD_FMA(param_4[6].data, weight_decay4.data, grad_4[6].data);
grad_4[7].data = SIMD_FMA(param_4[7].data, weight_decay4.data, grad_4[7].data);
}
momentum_4[0].data = SIMD_MUL(momentum_4[0].data, betta1_4.data);
momentum_4[0].data = SIMD_FMA(grad_4[0].data, betta1_minus1_4.data, momentum_4[0].data);
momentum_4[1].data = SIMD_MUL(momentum_4[1].data, betta1_4.data);
momentum_4[1].data = SIMD_FMA(grad_4[1].data, betta1_minus1_4.data, momentum_4[1].data);
momentum_4[2].data = SIMD_MUL(momentum_4[2].data, betta1_4.data);
momentum_4[2].data = SIMD_FMA(grad_4[2].data, betta1_minus1_4.data, momentum_4[2].data);
momentum_4[3].data = SIMD_MUL(momentum_4[3].data, betta1_4.data);
momentum_4[3].data = SIMD_FMA(grad_4[3].data, betta1_minus1_4.data, momentum_4[3].data);
momentum_4[4].data = SIMD_MUL(momentum_4[4].data, betta1_4.data);
momentum_4[4].data = SIMD_FMA(grad_4[4].data, betta1_minus1_4.data, momentum_4[4].data);
momentum_4[5].data = SIMD_MUL(momentum_4[5].data, betta1_4.data);
momentum_4[5].data = SIMD_FMA(grad_4[5].data, betta1_minus1_4.data, momentum_4[5].data);
momentum_4[6].data = SIMD_MUL(momentum_4[6].data, betta1_4.data);
momentum_4[6].data = SIMD_FMA(grad_4[6].data, betta1_minus1_4.data, momentum_4[6].data);
momentum_4[7].data = SIMD_MUL(momentum_4[7].data, betta1_4.data);
momentum_4[7].data = SIMD_FMA(grad_4[7].data, betta1_minus1_4.data, momentum_4[7].data);
variance_4[0].data = SIMD_MUL(variance_4[0].data, betta2_4.data);
variance_4[1].data = SIMD_MUL(variance_4[1].data, betta2_4.data);
variance_4[2].data = SIMD_MUL(variance_4[2].data, betta2_4.data);
variance_4[3].data = SIMD_MUL(variance_4[3].data, betta2_4.data);
variance_4[4].data = SIMD_MUL(variance_4[4].data, betta2_4.data);
variance_4[5].data = SIMD_MUL(variance_4[5].data, betta2_4.data);
variance_4[6].data = SIMD_MUL(variance_4[6].data, betta2_4.data);
variance_4[7].data = SIMD_MUL(variance_4[7].data, betta2_4.data);
grad_4[0].data = SIMD_MUL(grad_4[0].data, grad_4[0].data);
grad_4[1].data = SIMD_MUL(grad_4[1].data, grad_4[1].data);
grad_4[2].data = SIMD_MUL(grad_4[2].data, grad_4[2].data);
grad_4[3].data = SIMD_MUL(grad_4[3].data, grad_4[3].data);
grad_4[4].data = SIMD_MUL(grad_4[4].data, grad_4[4].data);
grad_4[5].data = SIMD_MUL(grad_4[5].data, grad_4[5].data);
grad_4[6].data = SIMD_MUL(grad_4[6].data, grad_4[6].data);
grad_4[7].data = SIMD_MUL(grad_4[7].data, grad_4[7].data);
variance_4[0].data = SIMD_FMA(grad_4[0].data, betta2_minus1_4.data, variance_4[0].data);
variance_4[1].data = SIMD_FMA(grad_4[1].data, betta2_minus1_4.data, variance_4[1].data);
variance_4[2].data = SIMD_FMA(grad_4[2].data, betta2_minus1_4.data, variance_4[2].data);
variance_4[3].data = SIMD_FMA(grad_4[3].data, betta2_minus1_4.data, variance_4[3].data);
variance_4[4].data = SIMD_FMA(grad_4[4].data, betta2_minus1_4.data, variance_4[4].data);
variance_4[5].data = SIMD_FMA(grad_4[5].data, betta2_minus1_4.data, variance_4[5].data);
variance_4[6].data = SIMD_FMA(grad_4[6].data, betta2_minus1_4.data, variance_4[6].data);
variance_4[7].data = SIMD_FMA(grad_4[7].data, betta2_minus1_4.data, variance_4[7].data);
grad_4[0].data = SIMD_SQRT(variance_4[0].data);
grad_4[1].data = SIMD_SQRT(variance_4[1].data);
grad_4[2].data = SIMD_SQRT(variance_4[2].data);
grad_4[3].data = SIMD_SQRT(variance_4[3].data);
grad_4[4].data = SIMD_SQRT(variance_4[4].data);
grad_4[5].data = SIMD_SQRT(variance_4[5].data);
grad_4[6].data = SIMD_SQRT(variance_4[6].data);
grad_4[7].data = SIMD_SQRT(variance_4[7].data);
grad_4[0].data = SIMD_FMA(grad_4[0].data, bias2_sqrt.data, eps_4.data);
grad_4[1].data = SIMD_FMA(grad_4[1].data, bias2_sqrt.data, eps_4.data);
grad_4[2].data = SIMD_FMA(grad_4[2].data, bias2_sqrt.data, eps_4.data);
grad_4[3].data = SIMD_FMA(grad_4[3].data, bias2_sqrt.data, eps_4.data);
grad_4[4].data = SIMD_FMA(grad_4[4].data, bias2_sqrt.data, eps_4.data);
grad_4[5].data = SIMD_FMA(grad_4[5].data, bias2_sqrt.data, eps_4.data);
grad_4[6].data = SIMD_FMA(grad_4[6].data, bias2_sqrt.data, eps_4.data);
grad_4[7].data = SIMD_FMA(grad_4[7].data, bias2_sqrt.data, eps_4.data);
grad_4[0].data = SIMD_DIV(momentum_4[0].data, grad_4[0].data);
grad_4[1].data = SIMD_DIV(momentum_4[1].data, grad_4[1].data);
grad_4[2].data = SIMD_DIV(momentum_4[2].data, grad_4[2].data);
grad_4[3].data = SIMD_DIV(momentum_4[3].data, grad_4[3].data);
grad_4[4].data = SIMD_DIV(momentum_4[4].data, grad_4[4].data);
grad_4[5].data = SIMD_DIV(momentum_4[5].data, grad_4[5].data);
grad_4[6].data = SIMD_DIV(momentum_4[6].data, grad_4[6].data);
grad_4[7].data = SIMD_DIV(momentum_4[7].data, grad_4[7].data);
if (_weight_decay > 0 && _adamw_mode) {
param_4[0].data = SIMD_FMA(param_4[0].data, weight_decay4.data, param_4[0].data);
param_4[1].data = SIMD_FMA(param_4[1].data, weight_decay4.data, param_4[1].data);
param_4[2].data = SIMD_FMA(param_4[2].data, weight_decay4.data, param_4[2].data);
param_4[3].data = SIMD_FMA(param_4[3].data, weight_decay4.data, param_4[3].data);
param_4[4].data = SIMD_FMA(param_4[4].data, weight_decay4.data, param_4[4].data);
param_4[5].data = SIMD_FMA(param_4[5].data, weight_decay4.data, param_4[5].data);
param_4[6].data = SIMD_FMA(param_4[6].data, weight_decay4.data, param_4[6].data);
param_4[7].data = SIMD_FMA(param_4[7].data, weight_decay4.data, param_4[7].data);
}
param_4[0].data = SIMD_FMA(grad_4[0].data, step_size_4.data, param_4[0].data);
param_4[1].data = SIMD_FMA(grad_4[1].data, step_size_4.data, param_4[1].data);
param_4[2].data = SIMD_FMA(grad_4[2].data, step_size_4.data, param_4[2].data);
param_4[3].data = SIMD_FMA(grad_4[3].data, step_size_4.data, param_4[3].data);
param_4[4].data = SIMD_FMA(grad_4[4].data, step_size_4.data, param_4[4].data);
param_4[5].data = SIMD_FMA(grad_4[5].data, step_size_4.data, param_4[5].data);
param_4[6].data = SIMD_FMA(grad_4[6].data, step_size_4.data, param_4[6].data);
param_4[7].data = SIMD_FMA(grad_4[7].data, step_size_4.data, param_4[7].data);
SIMD_STORE(_params + i, param_4[0].data);
SIMD_STORE(_params + i + SIMD_WIDTH, param_4[1].data);
SIMD_STORE(_params + i + (SIMD_WIDTH << 1), param_4[2].data);
SIMD_STORE(_params + i + SIMD_WIDTH * 3, param_4[3].data);
SIMD_STORE(_params + i + (SIMD_WIDTH << 2), param_4[4].data);
SIMD_STORE(_params + i + SIMD_WIDTH * 5, param_4[5].data);
SIMD_STORE(_params + i + SIMD_WIDTH * 6, param_4[6].data);
SIMD_STORE(_params + i + SIMD_WIDTH * 7, param_4[7].data);
if (dev_params) {
SIMD_STORE(_doubled_buffer[_buf_index] + (i - t), param_4[0].data);
SIMD_STORE(_doubled_buffer[_buf_index] + (i - t) + SIMD_WIDTH, param_4[1].data);
SIMD_STORE(_doubled_buffer[_buf_index] + (i - t) + (SIMD_WIDTH << 1),
param_4[2].data);
SIMD_STORE(_doubled_buffer[_buf_index] + (i - t) + SIMD_WIDTH * 3, param_4[3].data);
SIMD_STORE(_doubled_buffer[_buf_index] + (i - t) + (SIMD_WIDTH << 2),
param_4[4].data);
SIMD_STORE(_doubled_buffer[_buf_index] + (i - t) + SIMD_WIDTH * 5, param_4[5].data);
SIMD_STORE(_doubled_buffer[_buf_index] + (i - t) + SIMD_WIDTH * 6, param_4[6].data);
SIMD_STORE(_doubled_buffer[_buf_index] + (i - t) + SIMD_WIDTH * 7, param_4[7].data);
}
SIMD_STORE(_exp_avg + i, momentum_4[0].data);
SIMD_STORE(_exp_avg + i + SIMD_WIDTH, momentum_4[1].data);
SIMD_STORE(_exp_avg + i + (SIMD_WIDTH << 1), momentum_4[2].data);
SIMD_STORE(_exp_avg + i + SIMD_WIDTH * 3, momentum_4[3].data);
SIMD_STORE(_exp_avg + i + (SIMD_WIDTH << 2), momentum_4[4].data);
SIMD_STORE(_exp_avg + i + SIMD_WIDTH * 5, momentum_4[5].data);
SIMD_STORE(_exp_avg + i + SIMD_WIDTH * 6, momentum_4[6].data);
SIMD_STORE(_exp_avg + i + SIMD_WIDTH * 7, momentum_4[7].data);
SIMD_STORE(_exp_avg_sq + i, variance_4[0].data);
SIMD_STORE(_exp_avg_sq + i + SIMD_WIDTH, variance_4[1].data);
SIMD_STORE(_exp_avg_sq + i + (SIMD_WIDTH << 1), variance_4[2].data);
SIMD_STORE(_exp_avg_sq + i + SIMD_WIDTH * 3, variance_4[3].data);
SIMD_STORE(_exp_avg_sq + i + (SIMD_WIDTH << 2), variance_4[4].data);
SIMD_STORE(_exp_avg_sq + i + SIMD_WIDTH * 5, variance_4[5].data);
SIMD_STORE(_exp_avg_sq + i + SIMD_WIDTH * 6, variance_4[6].data);
SIMD_STORE(_exp_avg_sq + i + SIMD_WIDTH * 7, variance_4[7].data);
}
if (dev_params) {
launch_param_update(
_doubled_buffer[_buf_index], dev_params + t, copy_size, _streams[_buf_index]);
_buf_index = !_buf_index;
}
}
#endif
if (_param_size > rounded_size)
Step_4((_params + rounded_size),
(grads + rounded_size),
(_exp_avg + rounded_size),
(_exp_avg_sq + rounded_size),
(_param_size - rounded_size),
(dev_params != nullptr ? (dev_params + rounded_size) : dev_params));
}
int ds_adam_step(int optimizer_id,
size_t step,
float lr,
float beta1,
float beta2,
float epsilon,
float weight_decay,
bool bias_correction,
torch::Tensor& params,
torch::Tensor& grads,
torch::Tensor& exp_avg,
torch::Tensor& exp_avg_sq)
{
auto params_c = params.contiguous();
auto grads_c = grads.contiguous();
auto exp_avg_c = exp_avg.contiguous();
auto exp_avg_sq_c = exp_avg_sq.contiguous();
float* params_ptr = (float*)params_c.data_ptr();
float* grads_ptr = (float*)grads_c.data_ptr();
float* exp_avg_ptr = (float*)exp_avg_c.data_ptr();
float* exp_avg_sq_ptr = (float*)exp_avg_sq_c.data_ptr();
std::shared_ptr<Adam_Optimizer> opt =
std::static_pointer_cast<Adam_Optimizer>(s_optimizers[optimizer_id]);
opt->IncrementStep(step, beta1, beta2);
opt->update_state(lr, epsilon, weight_decay, bias_correction);
opt->Step_8(params_ptr, grads_ptr, exp_avg_ptr, exp_avg_sq_ptr, params_c.size(0));
opt->SynchronizeStreams();
return 0;
}
int ds_adam_step_plus_copy(int optimizer_id,
size_t step,
float lr,
float beta1,
float beta2,
float epsilon,
float weight_decay,
bool bias_correction,
torch::Tensor& params,
torch::Tensor& grads,
torch::Tensor& exp_avg,
torch::Tensor& exp_avg_sq,
torch::Tensor& gpu_params)
{
auto params_c = params.contiguous();
auto gpu_params_c = gpu_params.contiguous();
auto exp_avg_c = exp_avg.contiguous();
auto exp_avg_sq_c = exp_avg_sq.contiguous();
auto grads_c = grads.contiguous();
float* params_ptr = (float*)params_c.data_ptr();
float* grads_ptr = (float*)grads_c.data_ptr();
__half* gpu_params_ptr = (__half*)gpu_params_c.data_ptr();
float* exp_avg_ptr = (float*)exp_avg_c.data_ptr();
float* exp_avg_sq_ptr = (float*)exp_avg_sq_c.data_ptr();
std::shared_ptr<Adam_Optimizer> opt =
std::static_pointer_cast<Adam_Optimizer>(s_optimizers[optimizer_id]);
opt->IncrementStep(step, beta1, beta2);
opt->update_state(lr, epsilon, weight_decay, bias_correction);
opt->Step_8(
params_ptr, grads_ptr, exp_avg_ptr, exp_avg_sq_ptr, params_c.size(0), gpu_params_ptr);
opt->SynchronizeStreams();
return 0;
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m)
{
m.def("adam_update", &ds_adam_step, "DeepSpeed CPU Adam update (C++)");
m.def("adam_update_copy",
&ds_adam_step_plus_copy,
"DeepSpeed CPU Adam update and param copy (C++)");
m.def("create_adam", &create_adam_optimizer, "DeepSpeed CPU Adam (C++)");
}
#include "hip/hip_runtime.h"
#include "hip/custom_hip_layers.h"
__global__ void param_update_kernel(const float* input, __half* output, int size)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < size) { output[id] = (__half)input[id]; }
}
void launch_param_update(const float* input, __half* output, int size, hipStream_t stream)
{
int threads = 1024;
dim3 grid_dim((size - 1) / threads + 1);
dim3 block_dim(threads);
hipLaunchKernelGGL(( param_update_kernel), dim3(grid_dim), dim3(block_dim), 0, stream, input, output, size);
}
#include <torch/extension.h>
void multi_tensor_adam_cuda(int chunk_size,
at::Tensor noop_flag,
std::vector<std::vector<at::Tensor>> tensor_lists,
const float lr,
const float beta1,
const float beta2,
const float epsilon,
const int step,
const int mode,
const int bias_correction,
const float weight_decay);
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m)
{
m.def("multi_tensor_adam",
&multi_tensor_adam_cuda,
"Compute and apply gradient update to parameters for Adam optimizer");
}
/* Copyright 2020 The Microsoft DeepSpeed Team
Copyright NVIDIA/apex
This file is adapted from fused adam in NVIDIA/apex, commit a109f85
*/
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/Exceptions.h>
// Another possibility:
// #include <torch/all.h>
#include <assert.h>
#include "multi_tensor_apply.cuh"
#include "type_shim.h"
#define BLOCK_SIZE 512
#define ILP 4
typedef enum {
ADAM_MODE_0 = 0, // L2 regularization mode
ADAM_MODE_1 = 1 // Decoupled weight decay mode(AdamW)
} adamMode_t;
using MATH_T = float;
template <typename T>
struct AdamFunctor {
__device__ __forceinline__ void operator()(int chunk_size,
volatile int* noop_gmem,
TensorListMetadata<4>& tl,
const float beta1,
const float beta2,
const float beta1_correction,
const float beta2_correction,
const float epsilon,
const float lr,
adamMode_t mode,
const float decay)
{
// I'd like this kernel to propagate infs/nans.
// if(*noop_gmem == 1)
// return;
int tensor_loc = tl.block_to_tensor[blockIdx.x];
// potentially use to pass in list of scalar
// int tensor_num = tl.start_tensor_this_launch + tensor_loc;
int chunk_idx = tl.block_to_chunk[blockIdx.x];
int n = tl.sizes[tensor_loc];
T* g = (T*)tl.addresses[0][tensor_loc];
g += chunk_idx * chunk_size;
T* p = (T*)tl.addresses[1][tensor_loc];
p += chunk_idx * chunk_size;
T* m = (T*)tl.addresses[2][tensor_loc];
m += chunk_idx * chunk_size;
T* v = (T*)tl.addresses[3][tensor_loc];
v += chunk_idx * chunk_size;
n -= chunk_idx * chunk_size;
// see note in multi_tensor_scale_kernel.cu
for (int i_start = 0; i_start < n && i_start < chunk_size; i_start += blockDim.x * ILP) {
MATH_T r_g[ILP];
MATH_T r_p[ILP];
MATH_T r_m[ILP];
MATH_T r_v[ILP];
#pragma unroll
for (int ii = 0; ii < ILP; ii++) {
int i = i_start + threadIdx.x + ii * blockDim.x;
if (i < n && i < chunk_size) {
r_g[ii] = g[i];
r_p[ii] = p[i];
r_m[ii] = m[i];
r_v[ii] = v[i];
} else {
r_g[ii] = MATH_T(0);
r_p[ii] = MATH_T(0);
r_m[ii] = MATH_T(0);
r_v[ii] = MATH_T(0);
}
}
#pragma unroll
for (int ii = 0; ii < ILP; ii++) {
if (mode == ADAM_MODE_0) { // L2
r_g[ii] = r_g[ii] + (decay * r_p[ii]);
r_m[ii] = beta1 * r_m[ii] + (1 - beta1) * r_g[ii];
r_v[ii] = beta2 * r_v[ii] + (1 - beta2) * r_g[ii] * r_g[ii];
MATH_T next_m_unbiased = r_m[ii] / beta1_correction;
MATH_T next_v_unbiased = r_v[ii] / beta2_correction;
MATH_T denom = sqrtf(next_v_unbiased) + epsilon;
MATH_T update = next_m_unbiased / denom;
r_p[ii] = r_p[ii] - (lr * update);
} else { // weight decay
r_m[ii] = beta1 * r_m[ii] + (1 - beta1) * r_g[ii];
r_v[ii] = beta2 * r_v[ii] + (1 - beta2) * r_g[ii] * r_g[ii];
MATH_T next_m_unbiased = r_m[ii] / beta1_correction;
MATH_T next_v_unbiased = r_v[ii] / beta2_correction;
MATH_T denom = sqrtf(next_v_unbiased) + epsilon;
MATH_T update = (next_m_unbiased / denom) + (decay * r_p[ii]);
r_p[ii] = r_p[ii] - (lr * update);
}
}
#pragma unroll
for (int ii = 0; ii < ILP; ii++) {
int i = i_start + threadIdx.x + ii * blockDim.x;
if (i < n && i < chunk_size) {
p[i] = r_p[ii];
m[i] = r_m[ii];
v[i] = r_v[ii];
}
}
}
}
};
void multi_tensor_adam_cuda(int chunk_size,
at::Tensor noop_flag,
std::vector<std::vector<at::Tensor>> tensor_lists,
const float lr,
const float beta1,
const float beta2,
const float epsilon,
const int step,
const int mode,
const int bias_correction,
const float weight_decay)
{
using namespace at;
// Handle bias correction mode
float bias_correction1 = 1.0f, bias_correction2 = 1.0f;
if (bias_correction == 1) {
bias_correction1 = 1 - ::pow(beta1, step);
bias_correction2 = 1 - ::pow(beta2, step);
}
// Assume single type across p,g,m1,m2 now
DISPATCH_DOUBLE_FLOAT_AND_HALF(tensor_lists[0][0].scalar_type(),
0,
"adam",
multi_tensor_apply<4>(BLOCK_SIZE,
chunk_size,
noop_flag,
tensor_lists,
AdamFunctor<scalar_t_0>(),
beta1,
beta2,
bias_correction1,
bias_correction2,
epsilon,
lr,
(adamMode_t)mode,
weight_decay);)
AT_CUDA_CHECK(hipGetLastError());
}
#include "hip/hip_runtime.h"
/* Copyright 2020 The Microsoft DeepSpeed Team
Copyright NVIDIA/apex
This file is adapted from fused adam in NVIDIA/apex, commit a109f85
*/
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/Exceptions.h>
#include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h>
#include "compat.h"
#include <assert.h>
// #include <iostream>
// This header is the one-stop shop for all your multi-tensor apply needs.
// TODO: Kernel arg size limit may be <4KB for some other cards (ie Jetson)
constexpr int depth_to_max_tensors[5] = {110, 64, 48, 36, 30};
constexpr int depth_to_max_blocks[5] = {320, 320, 320, 320, 320};
template <int n>
struct TensorListMetadata {
void* addresses[n][depth_to_max_tensors[n - 1]];
int sizes[depth_to_max_tensors[n - 1]];
unsigned char block_to_tensor[depth_to_max_blocks[n - 1]];
int block_to_chunk[depth_to_max_blocks[n - 1]]; // I fear this needs to be a full int.
int start_tensor_this_launch;
};
template <typename T, typename U, typename... ArgTypes>
__global__ void multi_tensor_apply_kernel(int chunk_size,
volatile int* noop_flag,
T tl,
U callable,
ArgTypes... args)
{
// Hand the chunk information to the user-supplied functor to process however it likes.
callable(chunk_size, noop_flag, tl, args...);
}
template <int depth, typename T, typename... ArgTypes>
void multi_tensor_apply(int block_size,
int chunk_size,
const at::Tensor& noop_flag,
const std::vector<std::vector<at::Tensor>>& tensor_lists,
T callable,
ArgTypes... args)
{
TORCH_CHECK(tensor_lists.size() == depth, "tensor_lists.size() != depth");
int len0 = tensor_lists[0].size();
TORCH_CHECK(len0 > 0, "tensor_lists[0].size() is not > 0");
auto ref_device = tensor_lists[0][0].device();
TORCH_CHECK(ref_device.type() == at::kCUDA, "expected input to be on cuda");
for (int l = 0; l < tensor_lists.size(); l++) // No range-based for because I need indices
{
TORCH_CHECK(tensor_lists[l].size() == len0, "Size mismatch among tensor lists");
for (int t = 0; t < tensor_lists[l].size(); t++) {
// TODO: Print which tensor fails.
bool contiguous_memory = tensor_lists[l][t].is_contiguous();
#ifdef VERSION_GE_1_5
contiguous_memory = (contiguous_memory ||
tensor_lists[l][t].is_contiguous(at::MemoryFormat::ChannelsLast));
#endif
TORCH_CHECK(contiguous_memory, "A tensor was not contiguous.");
TORCH_CHECK(tensor_lists[l][t].device() == ref_device,
"A tensor was not on the same device as the first tensor");
TORCH_CHECK(tensor_lists[l][t].numel() == tensor_lists[0][t].numel(), "Size mismatch");
}
}
int ntensors = tensor_lists[0].size();
TensorListMetadata<depth> tl;
const at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard(device_of(tensor_lists[0][0]));
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
tl.start_tensor_this_launch = 0;
int loc_block_info = 0;
int loc_tensor_info = 0;
for (int t = 0; t < ntensors; t++) {
tl.sizes[loc_tensor_info] = tensor_lists[0][t].numel();
for (int d = 0; d < depth; d++)
tl.addresses[d][loc_tensor_info] = tensor_lists[d][t].data_ptr();
loc_tensor_info++;
int chunks_this_tensor = (tensor_lists[0][t].numel() + chunk_size - 1) / chunk_size;
for (int chunk = 0; chunk < chunks_this_tensor; chunk++) {
// std::cout << chunks_this_tensor << std::endl;
tl.block_to_tensor[loc_block_info] = loc_tensor_info - 1;
tl.block_to_chunk[loc_block_info] = chunk;
loc_block_info++;
bool tensors_full = (loc_tensor_info == depth_to_max_tensors[depth - 1] &&
chunk == chunks_this_tensor - 1);
bool blocks_full = (loc_block_info == depth_to_max_blocks[depth - 1]);
bool last_chunk = (t == ntensors - 1 && chunk == chunks_this_tensor - 1);
if (tensors_full || blocks_full || last_chunk) {
// using accscalar_t = acc_type<scalar_t, true>;
hipLaunchKernelGGL(( multi_tensor_apply_kernel), dim3(loc_block_info), dim3(block_size), 0, stream,
chunk_size, noop_flag.DATA_PTR<int>(), tl, callable, args...);
AT_CUDA_CHECK(hipGetLastError());
// Reset. The control flow possibilities here make my brain hurt.
loc_block_info = 0;
if (chunk == chunks_this_tensor - 1) {
// std::cout << "Hit case 1 " << cond1 << " " << cond2 << " " << cond3 <<
// std::endl;
loc_tensor_info = 0;
tl.start_tensor_this_launch = t + 1;
} else {
// std::cout << "Hit case 2 " << cond1 << " " << cond2 << " " << cond3 <<
// std::endl;
tl.sizes[0] = tl.sizes[loc_tensor_info - 1];
for (int d = 0; d < depth; d++)
tl.addresses[d][0] = tl.addresses[d][loc_tensor_info - 1];
loc_tensor_info = 1;
tl.start_tensor_this_launch = t;
}
}
}
}
}
#pragma once
#ifdef _WIN32
#include <windows.h>
#else
#include <time.h>
#endif
#ifdef _WIN32
class Stopwatch {
private:
double m_total_time;
LARGE_INTEGER m_start_time;
public:
Stopwatch() { m_total_time = 0.0; }
~Stopwatch() {}
void Reset() { m_total_time = 0.0; }
void Start() { QueryPerformanceCounter(&m_start_time); }
void Restart()
{
m_total_time = 0.0;
QueryPerformanceCounter(&m_start_time);
}
void Stop()
{
LARGE_INTEGER frequency;
LARGE_INTEGER stop_time;
QueryPerformanceFrequency(&frequency);
QueryPerformanceCounter(&stop_time);
m_total_time +=
((double)(stop_time.QuadPart - m_start_time.QuadPart) / (double)frequency.QuadPart);
}
double GetTimeInSeconds() { return m_total_time; }
};
#else
class Stopwatch {
private:
double m_total_time;
struct timespec m_start_time;
bool m_is_started;
public:
Stopwatch()
{
m_total_time = 0.0;
m_is_started = false;
}
~Stopwatch() {}
void Reset() { m_total_time = 0.0; }
void Start()
{
clock_gettime(CLOCK_MONOTONIC, &m_start_time);
m_is_started = true;
}
void Restart()
{
m_total_time = 0.0;
clock_gettime(CLOCK_MONOTONIC, &m_start_time);
m_is_started = true;
}
void Stop()
{
if (m_is_started) {
m_is_started = false;
struct timespec end_time;
clock_gettime(CLOCK_MONOTONIC, &end_time);
m_total_time += (double)(end_time.tv_sec - m_start_time.tv_sec) +
(double)(end_time.tv_nsec - m_start_time.tv_nsec) / 1e9;
}
}
double GetTimeInSeconds()
{
if (m_is_started) {
Stop();
Start();
}
return m_total_time;
}
};
#endif
#ifndef __TIMER_H__
#define __TIMER_H__
#include <hip/hip_runtime.h>
#include <chrono>
#include "hip/hip_runtime.h"
class GPUTimer {
hipEvent_t start, stop;
public:
GPUTimer()
{
hipEventCreate(&start);
hipEventCreate(&stop);
}
~GPUTimer()
{
hipEventDestroy(start);
hipEventDestroy(stop);
}
inline void Record() { hipEventRecord(start); }
inline void Elapsed(float& time_elapsed)
{
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&time_elapsed, start, stop);
}
};
class CPUTimer {
std::chrono::high_resolution_clock::time_point start;
public:
CPUTimer() : start(std::chrono::high_resolution_clock::now()) {}
inline void Reset() { start = std::chrono::high_resolution_clock::now(); }
inline float Elapsed()
{
auto temp = start;
start = std::chrono::high_resolution_clock::now();
return (float)(std::chrono::duration_cast<std::chrono::microseconds>(start - temp).count() /
1e3);
}
};
#endif
#pragma once
#include <ATen/hip/HIPContext.h>
#include <hip/hip_runtime_api.h>
#include <cassert>
#include <iostream>
#include <vector>
#include "rocblas.h"
#include "hip/hip_runtime.h"
#include "hiprand.h"
#include "gemm_test.h"
#define WARP_SIZE 32
#define CUDA_CHECK(callstr) \
{ \
hipError_t error_code = callstr; \
if (error_code != hipSuccess) { \
std::cerr << "CUDA error " << error_code << " at " << __FILE__ << ":" << __LINE__; \
assert(0); \
} \
}
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); i += blockDim.x * gridDim.x)
#define CUDA_2D_KERNEL_LOOP(i, n, j, m) \
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); i += blockDim.x * gridDim.x) \
for (size_t j = blockIdx.y * blockDim.y + threadIdx.y; j < (m); j += blockDim.y * gridDim.y)
#define DS_CUDA_NUM_THREADS 512
#define DS_MAXIMUM_NUM_BLOCKS 262144
inline int DS_GET_BLOCKS(const int N)
{
return (std::max)(
(std::min)((N + DS_CUDA_NUM_THREADS - 1) / DS_CUDA_NUM_THREADS, DS_MAXIMUM_NUM_BLOCKS),
// Use at least 1 block, since CUDA does not allow empty block
1);
}
class Context {
public:
Context() : _workspace(nullptr), _seed(42), _curr_offset(0)
{
hiprandCreateGenerator(&_gen, HIPRAND_RNG_PSEUDO_DEFAULT);
hiprandSetPseudoRandomGeneratorSeed(_gen, 123);
if (rocblas_create_handle(&_cublasHandle) != rocblas_status_success) {
auto message = std::string("Fail to create cublas handle.");
std::cerr << message << std::endl;
throw std::runtime_error(message);
}
}
virtual ~Context()
{
rocblas_destroy_handle(_cublasHandle);
hipFree(_workspace);
}
static Context& Instance()
{
static Context _ctx;
return _ctx;
}
void SetWorkSpace(void* workspace)
{
if (!workspace) { throw std::runtime_error("Workspace is null."); }
_workspace = workspace;
}
void* GetWorkSpace() { return _workspace; }
hiprandGenerator_t& GetRandGenerator() { return _gen; }
hipStream_t GetCurrentStream()
{
// get current pytorch stream.
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
return stream;
}
hipStream_t GetNewStream() { return at::hip::getStreamFromPoolMasqueradingAsCUDA(); }
rocblas_handle GetCublasHandle() { return _cublasHandle; }
std::pair<uint64_t, uint64_t> IncrementOffset(uint64_t offset_inc)
{
uint64_t offset = _curr_offset;
_curr_offset += offset_inc;
return std::pair<uint64_t, uint64_t>(_seed, offset);
}
void SetSeed(uint64_t new_seed) { _seed = new_seed; }
void TestGemmFP16(bool test_gemm, int batch_size, int seq_len, int head_num, int size_per_head)
{
// avoid rerun.
if (_gemm_algos.size() > 0) return;
if (test_gemm) {
rocblas_handle handle = GetCublasHandle();
std::unique_ptr<GemmTest<__half>> test_qkv_fw(
new GemmTest<__half>(batch_size * seq_len, // M
head_num * size_per_head, // N
head_num * size_per_head, // K
rocblas_operation_transpose,
rocblas_operation_none,
handle));
std::unique_ptr<GemmTest<__half>> test_inter(
new GemmTest<__half>(batch_size * seq_len, // M
4 * head_num * size_per_head, // N
head_num * size_per_head, // K
rocblas_operation_transpose,
rocblas_operation_none,
handle));
std::unique_ptr<GemmTest<__half>> test_output(
new GemmTest<__half>(batch_size * seq_len, // M
head_num * size_per_head, // N
4 * head_num * size_per_head, // K
rocblas_operation_transpose,
rocblas_operation_none,
handle));
std::unique_ptr<StridedGemmTest<__half>> test_attn_scores(
new StridedGemmTest<__half>(batch_size * head_num, // batch
seq_len, // M
seq_len, // N
size_per_head, // K
rocblas_operation_transpose,
rocblas_operation_none,
handle));
std::unique_ptr<StridedGemmTest<__half>> test_attn_context(
new StridedGemmTest<__half>(batch_size * head_num, // batch
size_per_head, // M
seq_len, // N
seq_len, // K
rocblas_operation_none,
rocblas_operation_none,
handle));
_gemm_algos.push_back(test_qkv_fw->TestAlgo(100));
_gemm_algos.push_back(test_inter->TestAlgo(100));
_gemm_algos.push_back(test_output->TestAlgo(100));
_gemm_algos.push_back(test_attn_scores->TestAlgo(100));
_gemm_algos.push_back(test_attn_context->TestAlgo(100));
} else {
// Use default algo.
_gemm_algos.push_back(std::array<int, 3>({99, 99, 99}));
_gemm_algos.push_back(std::array<int, 3>({99, 99, 99}));
_gemm_algos.push_back(std::array<int, 3>({99, 99, 99}));
_gemm_algos.push_back(std::array<int, 3>({99, 99, 99}));
_gemm_algos.push_back(std::array<int, 3>({99, 99, 99}));
}
}
const std::vector<std::array<int, 3>>& GetGemmAlgos() const { return _gemm_algos; }
private:
hiprandGenerator_t _gen;
rocblas_handle _cublasHandle;
void* _workspace;
uint64_t _seed;
uint64_t _curr_offset;
std::vector<std::array<int, 3>> _gemm_algos;
};
#pragma once
#include <cpuid.h>
#include <hip/hip_fp16.h>
#include <hip/hip_runtime_api.h>
#include <stdio.h>
#include <x86intrin.h>
#include <cassert>
#include "context.h"
#include "rocblas.h"
#include "hip/hip_runtime.h"
#include "hiprand.h"
#define CUDA_CHECK(callstr) \
{ \
hipError_t error_code = callstr; \
if (error_code != hipSuccess) { \
std::cerr << "CUDA error " << error_code << " at " << __FILE__ << ":" << __LINE__; \
assert(0); \
} \
}
#define TILE (1024 * 1024 * 1024)
#if defined(__AVX512__)
#define SIMD_STORE(a, d) _mm512_storeu_ps(a, d)
#define SIMD_LOAD(x) _mm512_loadu_ps(x)
#define SIMD_SET(x) _mm512_set1_ps(x)
#define SIMD_MUL(x, y) _mm512_mul_ps(x, y)
#define SIMD_FMA(x, y, c) _mm512_fmadd_ps(x, y, c)
#define SIMD_SQRT(x) _mm512_sqrt_ps(x)
#define SIMD_DIV(x, y) _mm512_div_ps(x, y)
#define SIMD_WIDTH 16
#else
#if defined(__AVX256__)
#define SIMD_STORE(a, d) _mm256_storeu_ps(a, d)
#define SIMD_LOAD(x) _mm256_loadu_ps(x)
#define SIMD_SET(x) _mm256_set1_ps(x)
#define SIMD_MUL(x, y) _mm256_mul_ps(x, y)
#define SIMD_FMA(x, y, c) _mm256_fmadd_ps(x, y, c)
#define SIMD_SQRT(x) _mm256_sqrt_ps(x)
#define SIMD_DIV(x, y) _mm256_div_ps(x, y)
#define SIMD_WIDTH 8
#endif
#endif
class Adam_Optimizer {
public:
Adam_Optimizer(float alpha = 1e-3,
float betta1 = 0.9,
float betta2 = 0.999,
float eps = 1e-8,
float weight_decay = 0,
bool adamw_mode = true)
: _alpha(alpha),
_betta1(betta1),
_betta2(betta2),
_eps(eps),
_weight_decay(weight_decay),
_betta1_t(1.0),
_betta2_t(1.0),
_step(0),
_buf_index(false),
_adamw_mode(adamw_mode)
{
hipHostMalloc((void**)_doubled_buffer, TILE * sizeof(float));
hipHostMalloc((void**)(_doubled_buffer + 1), TILE * sizeof(float));
_streams[0] = Context::Instance().GetCurrentStream();
_streams[1] = Context::Instance().GetNewStream();
}
~Adam_Optimizer()
{
hipHostFree(_doubled_buffer[0]);
hipHostFree(_doubled_buffer[1]);
}
void Step(float* _params,
float* grads,
float* _exp_avg,
float* _exp_avg_sq,
size_t param_size,
__half* dev_param = nullptr);
void Step_4(float* _params,
float* grads,
float* _exp_avg,
float* _exp_avg_sa,
size_t param_size,
__half* dev_param = nullptr);
void Step_8(float* _params,
float* grads,
float* _exp_avg,
float* _exp_avg_sq,
size_t _param_size,
__half* dev_params = nullptr);
inline void SynchronizeStreams()
{
for (int i = 0; i < 2; i++) hipStreamSynchronize(_streams[i]);
}
inline void IncrementStep(size_t step, float beta1, float beta2)
{
if (beta1 != _betta1 || beta2 != _betta2) {
_step = step;
_betta1 = beta1;
_betta2 = beta2;
_betta1_t = std::pow(_betta1, step);
_betta2_t = std::pow(_betta2, step);
} else {
_step++;
if (_step != step) {
_betta1_t = std::pow(_betta1, step);
_betta2_t = std::pow(_betta2, step);
_step = step;
} else {
_betta1_t *= _betta1;
_betta2_t *= _betta2;
}
}
}
inline void update_state(float lr, float epsilon, float weight_decay, bool bias_correction)
{
_alpha = lr;
_eps = epsilon;
_weight_decay = weight_decay;
_bias_correction1 = 1.0f;
_bias_correction2 = 1.0f;
if (bias_correction == 1) {
_bias_correction1 = 1 - _betta1_t;
_bias_correction2 = 1 / sqrt(1 - _betta2_t);
}
}
private:
#if defined(__AVX512__) or defined(__AVX256__)
union AVX_Data {
#if defined(__AVX512__)
__m512 data;
#else
__m256 data;
#endif
// float data_f[16];
};
#endif
float _alpha;
float _betta1;
float _betta2;
float _eps;
float _weight_decay;
float _betta1_t;
float _betta2_t;
size_t _step;
float _bias_correction1;
float _bias_correction2;
float* _doubled_buffer[2];
bool _buf_index;
bool _adamw_mode;
hipStream_t _streams[2];
};
#pragma once
#include <assert.h>
#include <rocblas.h>
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include <hip/hip_runtime.h>
#ifndef __HIP_PLATFORM_HCC__
#include <mma.h>
#endif
#include <stdio.h>
int cublas_gemm_ex(rocblas_handle handle,
rocblas_operation transa,
rocblas_operation transb,
int m,
int n,
int k,
const float* alpha,
const float* beta,
const float* A,
const float* B,
float* C,
#ifdef __HIP_PLATFORM_HCC__
rocblas_gemm_algo algo = rocblas_gemm_algo_standard);
#else
cublasGemmAlgo_t algo = CUBLAS_GEMM_DEFAULT);
#endif
int cublas_gemm_ex(rocblas_handle handle,
rocblas_operation transa,
rocblas_operation transb,
int m,
int n,
int k,
const float* alpha,
const float* beta,
const __half* A,
const __half* B,
__half* C,
#ifdef __HIP_PLATFORM_HCC__
rocblas_gemm_algo algo = rocblas_gemm_algo_standard);
#else
cublasGemmAlgo_t algo = CUBLAS_GEMM_DEFAULT_TENSOR_OP);
#endif
int cublas_strided_batched_gemm(rocblas_handle handle,
int m,
int n,
int k,
const float* alpha,
const float* beta,
const float* A,
const float* B,
float* C,
rocblas_operation op_A,
rocblas_operation op_B,
int stride_A,
int stride_B,
int stride_C,
int batch,
#ifdef __HIP_PLATFORM_HCC__
rocblas_gemm_algo algo = rocblas_gemm_algo_standard);
#else
cublasGemmAlgo_t algo = CUBLAS_GEMM_DEFAULT);
#endif
int cublas_strided_batched_gemm(rocblas_handle handle,
int m,
int n,
int k,
const float* alpha,
const float* beta,
const __half* A,
const __half* B,
__half* C,
rocblas_operation op_A,
rocblas_operation op_B,
int stride_A,
int stride_B,
int stride_C,
int batch,
#ifdef __HIP_PLATFORM_HCC__
rocblas_gemm_algo algo = rocblas_gemm_algo_standard);
#else
cublasGemmAlgo_t algo = CUBLAS_GEMM_DEFAULT_TENSOR_OP);
#endif
#pragma once
#include <assert.h>
#include <rocblas.h>
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include <hip/hip_runtime.h>
#include <mma.h>
#include <stdio.h>
int cublas_gemm_ex(rocblas_handle handle,
rocblas_operation transa,
rocblas_operation transb,
int m,
int n,
int k,
const float* alpha,
const float* beta,
const float* A,
const float* B,
float* C,
cublasGemmAlgo_t algo = CUBLAS_GEMM_DEFAULT);
int cublas_gemm_ex(rocblas_handle handle,
rocblas_operation transa,
rocblas_operation transb,
int m,
int n,
int k,
const float* alpha,
const float* beta,
const __half* A,
const __half* B,
__half* C,
cublasGemmAlgo_t algo = CUBLAS_GEMM_DEFAULT_TENSOR_OP);
int cublas_strided_batched_gemm(rocblas_handle handle,
int m,
int n,
int k,
const float* alpha,
const float* beta,
const float* A,
const float* B,
float* C,
rocblas_operation op_A,
rocblas_operation op_B,
int stride_A,
int stride_B,
int stride_C,
int batch,
cublasGemmAlgo_t algo = CUBLAS_GEMM_DEFAULT);
int cublas_strided_batched_gemm(rocblas_handle handle,
int m,
int n,
int k,
const float* alpha,
const float* beta,
const __half* A,
const __half* B,
__half* C,
rocblas_operation op_A,
rocblas_operation op_B,
int stride_A,
int stride_B,
int stride_C,
int batch,
cublasGemmAlgo_t algo = CUBLAS_GEMM_DEFAULT_TENSOR_OP);
#pragma once
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include <stdio.h>
#include <stdlib.h>
#ifdef __HIP_PLATFORM_HCC__
#include <hip/hip_cooperative_groups.h>
#else
#include <cooperative_groups.h>
#endif
#define __HIP_DEVICE_COMPILE__ 1
#define __HIP_PLATFORM_HCC__
#include <hiprand_kernel.h>
#include "context.h"
#include "cublas_wrappers.h"
#define MAX_THREADS 1024
#define THREADS 256
#define MAX_THREAD_STRIDE 32
#define TILE_DIM 32
// Maximum sequence-length support based on the number of threads (2048) allowed in each block and
// this MAX is 8K For higher sequence length we need to use higher Max, like for 64K : 32
#define MAX_THREAD_ITERATIONS 8 // Maximum 8K
#define MAX_WARP_NUM 32
#define MAX_REGISTERS 256
// Fused bias add with gelu activation
template <typename T>
void launch_bias_gelu(const T* input,
const T* bias,
T* output,
int intermediate_size,
int batch_size,
hipStream_t stream);
template <typename T>
void launch_gelu(const T* input,
T* output,
int intermediate_size,
int batch_size,
hipStream_t stream);
template <typename T>
void launch_d_gelu(T* d_output,
const T* input,
const T* bias,
int intermediate_size,
int batch_size,
hipStream_t stream);
// Custom fused bias add with layer normalization
template <typename T>
void launch_bias_residual_layer_norm(T* vals,
const T* residual,
const T* gamma,
const T* beta,
float epsilon,
int batch_size,
int hidden_dim,
hipStream_t stream,
bool preLayerNorm,
bool training,
T* vars,
T* means);
template <typename T>
void launch_bias_residual_layer_norm(T* vals,
const T* residual,
const T* gamma,
const T* beta,
float epsilon,
int batch_size,
int hidden_dim,
hipStream_t stream,
bool preLayerNorm,
bool training,
T* vars);
template <typename T>
void launch_layerNorm_backward_fused_add(const T* out_grad1,
const T* out_grad2,
const T* X_data,
const T* vars,
const T* means,
const T* gamma,
T* gamma_grad,
T* betta_grad,
T* inp_grad,
int batch_size,
int hidden_dim,
hipStream_t stream[2]);
template <typename T>
void launch_layerNorm_backward_fused_add(const T* out_grad1,
const T* out_grad2,
const T* vals_hat,
const T* vars,
const T* gamma,
T* gamma_grad,
T* betta_grad,
T* inp_grad,
int batch_size,
int hidden_dim,
hipStream_t stream[2],
bool invertible = false,
const T* betta = nullptr);
template <typename T>
void launch_layerNorm_backward(const T* out_grad,
const T* X_data,
const T* vars,
const T* means,
const T* gamma,
T* gamma_grad,
T* betta_grad,
T* inp_grad,
int batch_size,
int hidden_dim,
hipStream_t stream[2]);
template <typename T>
void launch_layerNorm_backward(const T* out_grad,
const T* vals_hat,
const T* vars,
const T* gamma,
T* gamma_grad,
T* betta_grad,
T* inp_grad,
int batch_size,
int hidden_dim,
hipStream_t stream[2],
bool invertible = false,
const T* betta = nullptr);
template <typename T>
void launch_layerNorm_backward_nreversible(const T* out_grad,
const T* vals,
const T* out_grad_trans,
const T* vals_trans,
const T* means,
const T* vars,
const T* gamma,
T* gamma_grad,
T* betta_grad,
T* inp_grad,
int batch_size,
int hidden_dim,
hipStream_t stream[2]);
template <typename T>
void Transpose(const T* inp_mat, T* out_mat, int rows, int cols, hipStream_t stream);
template <typename T>
void launch_attn_softmax_backward(T* out_grad,
const T* soft_inp,
int batch_size,
int heads,
int seq_length,
hipStream_t stream);
template <typename T>
void launch_attn_softmax_backward_v2(T* out_grad,
const T* soft_inp,
int batch_size,
int heads,
int seq_length,
hipStream_t stream);
// Custom softmax with scaling and attention mask addition
template <typename T>
void launch_attn_softmax(T* vals,
const T* attn_mask,
int batch_size,
int heads,
int sequence_length,
hipStream_t stream);
template <typename T>
void launch_transform_0213(T* output,
const T* vals,
int batch_size,
int seq_length,
int hidden_dim,
int heads,
hipStream_t stream);
// Custom bias add
template <typename T>
void launch_bias_add_transform_0213(T* outputs,
const T* vals,
const T* bias,
int batch_size,
int seq_length,
int hidden_dim,
int heads,
hipStream_t stream,
int trans_count);
// 4D transform [0, 1, 2, 3] -> [0, 2, 1, 3]
template <typename T>
void launch_transform4d_0213(T* out,
const T* in,
int batch_size,
int heads,
int seq_length,
int hidden_dim,
hipStream_t stream,
int trans_count);
template <typename T>
void launch_dropout(T* vals,
const T* bias,
uint8_t* mask,
int batch,
int dim,
float ratio,
hipStream_t stream);
template <typename T>
void launch_dropout(T* vals_out,
const T* vals,
uint8_t* mask,
int total_count,
int dim,
float ratio,
hipStream_t stream,
bool bwd = false);
template <typename T>
void launch_dropout(T* out,
const T* vals,
const T* residual,
const T* bias,
uint8_t* mask,
int batch,
int dim,
float ratio,
hipStream_t stream);
template <typename T>
void launch_dropout_grad(T* vals, uint8_t* mask, int total_count, float ratio, hipStream_t stream);
template <typename T>
void launch_dropout_grad(T* vals_out,
const T* vals,
uint8_t* mask,
int total_count,
float ratio,
hipStream_t stream);
template <typename T>
void launch_fuse_transpose_bias_kernel(const T* inp,
T* out,
int rows,
int cols,
hipStream_t stream);
void launch_param_update(const float* input, __half* output, int size, hipStream_t stream);
#pragma once
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include <stdio.h>
template <typename T>
class Dropout {
public:
struct Config {
float ratio;
uint32_t dim;
bool training;
Config(float r, uint32_t d) : ratio(r), dim(d), training(true) {}
float RATIO() const { return training ? ratio : 0.0; }
inline void SetDim(uint32_t d) { dim = d; }
};
Dropout(const Config& config) : _config(config), _mask(nullptr) {}
virtual ~Dropout() {}
void Forward(int bsz, T* out, const T* vals, hipStream_t stream, bool bwd = false)
{
launch_dropout<T>(
out, vals, _mask, bsz * _config.dim, _config.dim, _config.RATIO(), stream, bwd);
}
void ForwardWithBias(int bsz, T* vals, const T* bias, hipStream_t stream)
{
launch_dropout<T>(vals, bias, _mask, bsz, _config.dim, _config.RATIO(), stream);
}
void ForwardWithBias(int bsz,
T* out,
const T* vals,
const T* residual,
const T* bias,
hipStream_t stream)
{
launch_dropout<T>(
out, vals, residual, bias, _mask, bsz, _config.dim, _config.RATIO(), stream);
}
void Backward(int bsz, T* d_vals, hipStream_t stream)
{
launch_dropout_grad<T>(d_vals, _mask, bsz * _config.dim, _config.RATIO(), stream);
}
void Backward(int bsz, T* d_vals_out, const T* d_vals, hipStream_t stream)
{
launch_dropout_grad<T>(
d_vals_out, d_vals, _mask, bsz * _config.dim, _config.RATIO(), stream);
}
bool HasDropout() const { return _config.RATIO() > 0.0; }
void SetTrainingMode(bool training) { _config.training = training; }
void SetMask(uint8_t* mask)
{
if (!mask) { throw std::runtime_error("Dropout mask is null."); }
_mask = mask;
}
Config GetConfig() const { return _config; }
inline void SetDimension(uint32_t dim) { _config.SetDim(dim); }
private:
uint8_t* _mask;
Config _config;
};
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment