Unverified Commit 91b4a93d authored by Jeff Rasley's avatar Jeff Rasley Committed by GitHub
Browse files

pytest skips for tests requiring certain ops (#411)

* add pytest skips around tests that require certain ops to be installed
parent 473ff985
......@@ -6,8 +6,12 @@ import numpy as np
import pytest
import copy
import deepspeed
from deepspeed.ops.adam import DeepSpeedCPUAdam
if not deepspeed.ops.__installed_ops__['cpu-adam']:
pytest.skip("cpu-adam is not installed", allow_module_level=True)
def check_equal(first, second, atol=1e-2, verbose=False):
x = first.detach().numpy()
......
import torch
import torch.distributed as dist
import deepspeed
......@@ -151,6 +152,8 @@ def checkpoint_correctness_verification(args,
compare_lr_scheduler_states(trained_model, loaded_model)
@pytest.mark.skipif(not deepspeed.ops.__installed_ops__['lamb'],
reason="lamb is not installed")
def test_checkpoint_unfused_optimizer(tmpdir):
config_dict = {
"train_batch_size": 2,
......@@ -264,6 +267,9 @@ def test_checkpoint_fused_optimizer(tmpdir):
'deepspeed_adam'),
])
def test_checkpoint_zero_optimizer(tmpdir, zero_stage, use_cpu_offload, adam_optimizer):
if use_cpu_offload and not deepspeed.ops.__installed_ops__['cpu-adam']:
pytest.skip("cpu-adam is not installed")
config_dict = {
"train_batch_size": 2,
"steps_per_print": 1,
......@@ -320,6 +326,9 @@ def test_checkpoint_zero_no_optimizer(tmpdir,
zero_stage,
use_cpu_offload,
adam_optimizer):
if use_cpu_offload and not deepspeed.ops.__installed_ops__['cpu-adam']:
pytest.skip("cpu-adam is not installed")
config_dict = {
"train_batch_size": 2,
"steps_per_print": 1,
......@@ -379,6 +388,9 @@ def test_checkpoint_zero_no_optimizer(tmpdir,
'deepspeed_adam'),
])
def test_checkpoint_lr_scheduler(tmpdir, zero_stage, use_cpu_offload, adam_optimizer):
if use_cpu_offload and not deepspeed.ops.__installed_ops__['cpu-adam']:
pytest.skip("cpu-adam is not installed")
config_dict = {
"train_batch_size": 2,
"steps_per_print": 1,
......@@ -450,6 +462,9 @@ def test_checkpoint_lr_scheduler(tmpdir, zero_stage, use_cpu_offload, adam_optim
'deepspeed_adam'),
])
def test_checkpoint_no_lr_scheduler(tmpdir, zero_stage, use_cpu_offload, adam_optimizer):
if use_cpu_offload and not deepspeed.ops.__installed_ops__['cpu-adam']:
pytest.skip("cpu-adam is not installed")
config_dict = {
"train_batch_size": 2,
"steps_per_print": 1,
......
......@@ -12,9 +12,13 @@ from modelingpreln import BertEncoder as BertEncoderPreln
from modeling import BertEncoder as BertEncoderPostln
from modeling import BertConfig, BertLayerNorm
from deepspeed import DeepSpeedTransformerLayer, DeepSpeedTransformerConfig
import deepspeed
import sys
if not deepspeed.ops.__installed_ops__['transformer']:
pytest.skip("transformer kernels are not installed", allow_module_level=True)
def check_equal(first, second, atol=1e-2, verbose=False):
diction_x = {}
......
......@@ -12,9 +12,13 @@ from modelingpreln import BertEncoder as BertEncoderPreln
from modeling import BertEncoder as BertEncoderPostln
from modeling import BertLayerNorm, BertConfig
from deepspeed import DeepSpeedTransformerLayer, DeepSpeedTransformerConfig
import deepspeed
import sys
if not deepspeed.ops.__installed_ops__['transformer']:
pytest.skip("transformer kernels are not installed", allow_module_level=True)
def check_equal(first, second, atol=1e-2, verbose=False):
if verbose:
......
......@@ -8,6 +8,9 @@ import numpy as np
from common import distributed_test
from simple_model import SimpleModel, args_from_dict
lamb_available = pytest.mark.skipif(not deepspeed.ops.__installed_ops__['lamb'],
reason="lamb is not installed")
def run_model_step(model, gradient_list):
for value in gradient_list:
......@@ -165,6 +168,7 @@ def test_fused_some_overflow(tmpdir):
_test_fused_some_overflow(args)
@lamb_available
def test_unfused_no_overflow(tmpdir):
config_dict = {
"train_batch_size": 1,
......@@ -208,6 +212,7 @@ def test_unfused_no_overflow(tmpdir):
_test_unfused_no_overflow(args)
@lamb_available
def test_unfused_all_overflow(tmpdir):
config_dict = {
"train_batch_size": 1,
......@@ -253,6 +258,7 @@ def test_unfused_all_overflow(tmpdir):
_test_unfused_all_overflow(args)
@lamb_available
def test_unfused_some_overflow(tmpdir):
config_dict = {
"train_batch_size": 1,
......
......@@ -8,7 +8,11 @@ import os
from common import distributed_test
from simple_model import SimpleModel, SimpleOptimizer, random_dataloader, args_from_dict
lamb_available = pytest.mark.skipif(not deepspeed.ops.__installed_ops__['lamb'],
reason="lamb is not installed")
@lamb_available
def test_lamb_fp32_grad_clip(tmpdir):
config_dict = {
"train_batch_size": 2,
......@@ -44,6 +48,7 @@ def test_lamb_fp32_grad_clip(tmpdir):
_test_lamb_fp32_grad_clip(args=args, model=model, hidden_dim=hidden_dim)
@lamb_available
def test_lamb_fp16_basic(tmpdir):
config_dict = {
"train_batch_size": 2,
......@@ -81,6 +86,7 @@ def test_lamb_fp16_basic(tmpdir):
_test_lamb_fp16_basic(args=args, model=model, hidden_dim=hidden_dim)
@lamb_available
def test_lamb_fp16_empty_grad(tmpdir):
config_dict = {
"train_batch_size": 2,
......@@ -228,6 +234,8 @@ def test_adamw_fp16_empty_grad(tmpdir):
True),
])
def test_adam_fp16_zero_onecycle_compatibility(tmpdir, zero_stage, use_cpu_offload):
if use_cpu_offload and not deepspeed.ops.__installed_ops__['cpu-adam']:
pytest.skip("cpu-adam is not installed")
config_dict = {
"train_batch_size": 1,
"steps_per_print": 1,
......@@ -294,6 +302,8 @@ def test_adam_fp16_zero_onecycle_compatibility(tmpdir, zero_stage, use_cpu_offlo
True),
])
def test_zero_static_scale(tmpdir, zero_stage, use_cpu_offload):
if use_cpu_offload and not deepspeed.ops.__installed_ops__['cpu-adam']:
pytest.skip("cpu-adam is not installed")
config_dict = {
"train_batch_size": 4,
"steps_per_print": 1,
......@@ -392,6 +402,8 @@ def test_zero_static_scale_deprecated_format(tmpdir):
True),
])
def test_zero_allow_untested_optimizer(tmpdir, zero_stage, use_cpu_offload):
if use_cpu_offload and not deepspeed.ops.__installed_ops__['cpu-adam']:
pytest.skip("cpu-adam is not installed")
config_dict = {
"train_batch_size": 4,
"steps_per_print": 1,
......@@ -430,6 +442,8 @@ def test_zero_allow_untested_optimizer(tmpdir, zero_stage, use_cpu_offload):
True),
])
def test_zero_empty_partition(tmpdir, zero_stage, use_cpu_offload):
if use_cpu_offload and not deepspeed.ops.__installed_ops__['cpu-adam']:
pytest.skip("cpu-adam is not installed")
config_dict = {
"train_micro_batch_size_per_gpu": 1,
"gradient_accumulation_steps": 1,
......@@ -500,6 +514,7 @@ def test_adam_amp_basic(tmpdir):
_test_adam_amp_basic(args=args, model=model, hidden_dim=hidden_dim)
@lamb_available
def test_lamb_amp_basic(tmpdir):
config_dict = {
"train_batch_size": 2,
......
......@@ -5,6 +5,10 @@
import pytest
import torch
import deepspeed
if not deepspeed.ops.__installed_ops__['sparse-attn']:
pytest.skip("cpu-adam is not installed", allow_module_level=True)
def test_sparse_attention_module_availability():
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment