"docs/_static/git@developer.sourcefind.cn:change/sglang.git" did not exist on "909abb58f551b9b517587a97b66761fd73568718"
Unverified Commit 02b176c4 authored by LSinev's avatar LSinev Committed by GitHub
Browse files

Fix torch version comparisons (#18460)

Comparisons like
version.parse(torch.__version__) > version.parse("1.6")
are True for torch==1.6.0+cu101 or torch==1.6.0+cpu

version.parse(version.parse(torch.__version__).base_version) are preferred (and available in pytorch_utils.py
parent be41eaf5
...@@ -20,7 +20,6 @@ from typing import Optional, Tuple, Union ...@@ -20,7 +20,6 @@ from typing import Optional, Tuple, Union
import torch import torch
import torch.utils.checkpoint import torch.utils.checkpoint
from packaging import version
from torch import nn from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
...@@ -34,7 +33,12 @@ from ...modeling_outputs import ( ...@@ -34,7 +33,12 @@ from ...modeling_outputs import (
TokenClassifierOutput, TokenClassifierOutput,
) )
from ...modeling_utils import PreTrainedModel from ...modeling_utils import PreTrainedModel
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...pytorch_utils import (
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
is_torch_greater_than_1_6,
prune_linear_layer,
)
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_nystromformer import NystromformerConfig from .configuration_nystromformer import NystromformerConfig
...@@ -68,7 +72,7 @@ class NystromformerEmbeddings(nn.Module): ...@@ -68,7 +72,7 @@ class NystromformerEmbeddings(nn.Module):
# position_ids (1, len position emb) is contiguous in memory and exported when serialized # position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)) + 2) self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)) + 2)
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
if version.parse(torch.__version__) > version.parse("1.6.0"): if is_torch_greater_than_1_6:
self.register_buffer( self.register_buffer(
"token_type_ids", "token_type_ids",
torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device), torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device),
......
...@@ -23,7 +23,6 @@ from typing import Dict, List, Optional, Tuple, Union ...@@ -23,7 +23,6 @@ from typing import Dict, List, Optional, Tuple, Union
import torch import torch
import torch.utils.checkpoint import torch.utils.checkpoint
from packaging import version
from torch import nn from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
...@@ -40,7 +39,7 @@ from ...modeling_outputs import ( ...@@ -40,7 +39,7 @@ from ...modeling_outputs import (
TokenClassifierOutput, TokenClassifierOutput,
) )
from ...modeling_utils import PreTrainedModel from ...modeling_utils import PreTrainedModel
from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer from ...pytorch_utils import find_pruneable_heads_and_indices, is_torch_greater_than_1_6, prune_linear_layer
from ...utils import ( from ...utils import (
add_code_sample_docstrings, add_code_sample_docstrings,
add_start_docstrings, add_start_docstrings,
...@@ -167,7 +166,7 @@ class QDQBertEmbeddings(nn.Module): ...@@ -167,7 +166,7 @@ class QDQBertEmbeddings(nn.Module):
# position_ids (1, len position emb) is contiguous in memory and exported when serialized # position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
if version.parse(torch.__version__) > version.parse("1.6.0"): if is_torch_greater_than_1_6:
self.register_buffer( self.register_buffer(
"token_type_ids", "token_type_ids",
torch.zeros(self.position_ids.size(), dtype=torch.long), torch.zeros(self.position_ids.size(), dtype=torch.long),
......
...@@ -20,7 +20,6 @@ from dataclasses import dataclass ...@@ -20,7 +20,6 @@ from dataclasses import dataclass
from typing import Optional, Tuple, Union from typing import Optional, Tuple, Union
import torch import torch
from packaging import version
from torch import nn from torch import nn
from torch.nn import CrossEntropyLoss from torch.nn import CrossEntropyLoss
...@@ -32,7 +31,12 @@ from ...modeling_outputs import ( ...@@ -32,7 +31,12 @@ from ...modeling_outputs import (
ModelOutput, ModelOutput,
) )
from ...modeling_utils import PreTrainedModel from ...modeling_utils import PreTrainedModel
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...pytorch_utils import (
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
is_torch_greater_than_1_6,
prune_linear_layer,
)
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
from .configuration_realm import RealmConfig from .configuration_realm import RealmConfig
...@@ -181,7 +185,7 @@ class RealmEmbeddings(nn.Module): ...@@ -181,7 +185,7 @@ class RealmEmbeddings(nn.Module):
# position_ids (1, len position emb) is contiguous in memory and exported when serialized # position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
if version.parse(torch.__version__) > version.parse("1.6.0"): if is_torch_greater_than_1_6:
self.register_buffer( self.register_buffer(
"token_type_ids", "token_type_ids",
torch.zeros(self.position_ids.size(), dtype=torch.long), torch.zeros(self.position_ids.size(), dtype=torch.long),
......
...@@ -20,7 +20,6 @@ from typing import List, Optional, Tuple, Union ...@@ -20,7 +20,6 @@ from typing import List, Optional, Tuple, Union
import torch import torch
import torch.utils.checkpoint import torch.utils.checkpoint
from packaging import version
from torch import nn from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
...@@ -36,7 +35,12 @@ from ...modeling_outputs import ( ...@@ -36,7 +35,12 @@ from ...modeling_outputs import (
TokenClassifierOutput, TokenClassifierOutput,
) )
from ...modeling_utils import PreTrainedModel from ...modeling_utils import PreTrainedModel
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...pytorch_utils import (
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
is_torch_greater_than_1_6,
prune_linear_layer,
)
from ...utils import ( from ...utils import (
add_code_sample_docstrings, add_code_sample_docstrings,
add_start_docstrings, add_start_docstrings,
...@@ -83,7 +87,7 @@ class RobertaEmbeddings(nn.Module): ...@@ -83,7 +87,7 @@ class RobertaEmbeddings(nn.Module):
# position_ids (1, len position emb) is contiguous in memory and exported when serialized # position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
if version.parse(torch.__version__) > version.parse("1.6.0"): if is_torch_greater_than_1_6:
self.register_buffer( self.register_buffer(
"token_type_ids", "token_type_ids",
torch.zeros(self.position_ids.size(), dtype=torch.long), torch.zeros(self.position_ids.size(), dtype=torch.long),
......
...@@ -21,7 +21,6 @@ from typing import List, Optional, Tuple ...@@ -21,7 +21,6 @@ from typing import List, Optional, Tuple
import torch import torch
import torch.utils.checkpoint import torch.utils.checkpoint
from packaging import version
from torch import nn from torch import nn
from torch.nn import CrossEntropyLoss from torch.nn import CrossEntropyLoss
...@@ -35,14 +34,19 @@ from ...modeling_outputs import ( ...@@ -35,14 +34,19 @@ from ...modeling_outputs import (
TokenClassifierOutput, TokenClassifierOutput,
) )
from ...modeling_utils import PreTrainedModel from ...modeling_utils import PreTrainedModel
from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer from ...pytorch_utils import (
find_pruneable_heads_and_indices,
is_torch_greater_or_equal_than_1_10,
is_torch_greater_than_1_6,
prune_linear_layer,
)
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
from .configuration_vilt import ViltConfig from .configuration_vilt import ViltConfig
logger = logging.get_logger(__name__) logger = logging.get_logger(__name__)
if version.parse(torch.__version__) < version.parse("1.10.0"): if not is_torch_greater_or_equal_than_1_10:
logger.warning( logger.warning(
f"You are using torch=={torch.__version__}, but torch>=1.10.0 is required to use " f"You are using torch=={torch.__version__}, but torch>=1.10.0 is required to use "
"ViltModel. Please upgrade torch." "ViltModel. Please upgrade torch."
...@@ -251,7 +255,7 @@ class TextEmbeddings(nn.Module): ...@@ -251,7 +255,7 @@ class TextEmbeddings(nn.Module):
# position_ids (1, len position emb) is contiguous in memory and exported when serialized # position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
if version.parse(torch.__version__) > version.parse("1.6.0"): if is_torch_greater_than_1_6:
self.register_buffer( self.register_buffer(
"token_type_ids", "token_type_ids",
torch.zeros(self.position_ids.size(), dtype=torch.long), torch.zeros(self.position_ids.size(), dtype=torch.long),
......
...@@ -19,7 +19,6 @@ from typing import List, Optional, Tuple, Union ...@@ -19,7 +19,6 @@ from typing import List, Optional, Tuple, Union
import torch import torch
import torch.utils.checkpoint import torch.utils.checkpoint
from packaging import version
from torch import nn from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
...@@ -35,7 +34,12 @@ from ...modeling_outputs import ( ...@@ -35,7 +34,12 @@ from ...modeling_outputs import (
TokenClassifierOutput, TokenClassifierOutput,
) )
from ...modeling_utils import PreTrainedModel from ...modeling_utils import PreTrainedModel
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...pytorch_utils import (
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
is_torch_greater_than_1_6,
prune_linear_layer,
)
from ...utils import ( from ...utils import (
add_code_sample_docstrings, add_code_sample_docstrings,
add_start_docstrings, add_start_docstrings,
...@@ -76,7 +80,7 @@ class XLMRobertaXLEmbeddings(nn.Module): ...@@ -76,7 +80,7 @@ class XLMRobertaXLEmbeddings(nn.Module):
# position_ids (1, len position emb) is contiguous in memory and exported when serialized # position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
if version.parse(torch.__version__) > version.parse("1.6.0"): if is_torch_greater_than_1_6:
self.register_buffer( self.register_buffer(
"token_type_ids", "token_type_ids",
torch.zeros(self.position_ids.size(), dtype=torch.long), torch.zeros(self.position_ids.size(), dtype=torch.long),
......
...@@ -21,7 +21,6 @@ from typing import Optional, Tuple, Union ...@@ -21,7 +21,6 @@ from typing import Optional, Tuple, Union
import torch import torch
import torch.utils.checkpoint import torch.utils.checkpoint
from packaging import version
from torch import nn from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
...@@ -35,7 +34,12 @@ from ...modeling_outputs import ( ...@@ -35,7 +34,12 @@ from ...modeling_outputs import (
TokenClassifierOutput, TokenClassifierOutput,
) )
from ...modeling_utils import PreTrainedModel from ...modeling_utils import PreTrainedModel
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...pytorch_utils import (
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
is_torch_greater_than_1_6,
prune_linear_layer,
)
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_yoso import YosoConfig from .configuration_yoso import YosoConfig
...@@ -257,7 +261,7 @@ class YosoEmbeddings(nn.Module): ...@@ -257,7 +261,7 @@ class YosoEmbeddings(nn.Module):
# position_ids (1, len position emb) is contiguous in memory and exported when serialized # position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)) + 2) self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)) + 2)
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
if version.parse(torch.__version__) > version.parse("1.6.0"): if is_torch_greater_than_1_6:
self.register_buffer( self.register_buffer(
"token_type_ids", "token_type_ids",
torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device), torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device),
......
...@@ -34,6 +34,7 @@ from .config import OnnxConfig ...@@ -34,6 +34,7 @@ from .config import OnnxConfig
if is_torch_available(): if is_torch_available():
from ..modeling_utils import PreTrainedModel from ..modeling_utils import PreTrainedModel
from ..pytorch_utils import is_torch_less_than_1_11
if is_tf_available(): if is_tf_available():
from ..modeling_tf_utils import TFPreTrainedModel from ..modeling_tf_utils import TFPreTrainedModel
...@@ -155,7 +156,7 @@ def export_pytorch( ...@@ -155,7 +156,7 @@ def export_pytorch(
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility # so we check the torch version for backwards compatibility
if parse(torch.__version__) < parse("1.10"): if is_torch_less_than_1_11:
# export can work with named args but the dict containing named args # export can work with named args but the dict containing named args
# has to be the last element of the args tuple. # has to be the last element of the args tuple.
try: try:
......
...@@ -967,7 +967,9 @@ class Pipeline(_ScikitCompat): ...@@ -967,7 +967,9 @@ class Pipeline(_ScikitCompat):
def get_inference_context(self): def get_inference_context(self):
inference_context = ( inference_context = (
torch.inference_mode if version.parse(torch.__version__) >= version.parse("1.9.0") else torch.no_grad torch.inference_mode
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.9.0")
else torch.no_grad
) )
return inference_context return inference_context
......
...@@ -25,8 +25,12 @@ ALL_LAYERNORM_LAYERS = [nn.LayerNorm] ...@@ -25,8 +25,12 @@ ALL_LAYERNORM_LAYERS = [nn.LayerNorm]
logger = logging.get_logger(__name__) logger = logging.get_logger(__name__)
is_torch_less_than_1_8 = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.8.0") parsed_torch_version_base = version.parse(version.parse(torch.__version__).base_version)
is_torch_less_than_1_11 = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11") is_torch_greater_or_equal_than_1_6 = parsed_torch_version_base >= version.parse("1.6.0")
is_torch_greater_than_1_6 = parsed_torch_version_base > version.parse("1.6.0")
is_torch_less_than_1_8 = parsed_torch_version_base < version.parse("1.8.0")
is_torch_greater_or_equal_than_1_10 = parsed_torch_version_base >= version.parse("1.10")
is_torch_less_than_1_11 = parsed_torch_version_base < version.parse("1.11")
def torch_int_div(tensor1, tensor2): def torch_int_div(tensor1, tensor2):
......
...@@ -71,7 +71,12 @@ from .modelcard import TrainingSummary ...@@ -71,7 +71,12 @@ from .modelcard import TrainingSummary
from .modeling_utils import PreTrainedModel, load_sharded_checkpoint, unwrap_model from .modeling_utils import PreTrainedModel, load_sharded_checkpoint, unwrap_model
from .models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_MAPPING_NAMES from .models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_MAPPING_NAMES
from .optimization import Adafactor, get_scheduler from .optimization import Adafactor, get_scheduler
from .pytorch_utils import ALL_LAYERNORM_LAYERS from .pytorch_utils import (
ALL_LAYERNORM_LAYERS,
is_torch_greater_or_equal_than_1_6,
is_torch_greater_or_equal_than_1_10,
is_torch_less_than_1_11,
)
from .tokenization_utils_base import PreTrainedTokenizerBase from .tokenization_utils_base import PreTrainedTokenizerBase
from .trainer_callback import ( from .trainer_callback import (
CallbackHandler, CallbackHandler,
...@@ -165,11 +170,11 @@ if is_in_notebook(): ...@@ -165,11 +170,11 @@ if is_in_notebook():
if is_apex_available(): if is_apex_available():
from apex import amp from apex import amp
if version.parse(torch.__version__) >= version.parse("1.6"): if is_torch_greater_or_equal_than_1_6:
_is_torch_generator_available = True _is_torch_generator_available = True
_is_native_cuda_amp_available = True _is_native_cuda_amp_available = True
if version.parse(torch.__version__) >= version.parse("1.10"): if is_torch_greater_or_equal_than_1_10:
_is_native_cpu_amp_available = True _is_native_cpu_amp_available = True
if is_datasets_available(): if is_datasets_available():
...@@ -405,7 +410,7 @@ class Trainer: ...@@ -405,7 +410,7 @@ class Trainer:
# Would have to update setup.py with torch>=1.12.0 # Would have to update setup.py with torch>=1.12.0
# which isn't ideally given that it will force people not using FSDP to also use torch>=1.12.0 # which isn't ideally given that it will force people not using FSDP to also use torch>=1.12.0
# below is the current alternative. # below is the current alternative.
if version.parse(torch.__version__) < version.parse("1.12.0"): if version.parse(version.parse(torch.__version__).base_version) < version.parse("1.12.0"):
raise ValueError("FSDP requires PyTorch >= 1.12.0") raise ValueError("FSDP requires PyTorch >= 1.12.0")
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
...@@ -1676,7 +1681,7 @@ class Trainer: ...@@ -1676,7 +1681,7 @@ class Trainer:
is_random_sampler = hasattr(train_dataloader, "sampler") and isinstance( is_random_sampler = hasattr(train_dataloader, "sampler") and isinstance(
train_dataloader.sampler, RandomSampler train_dataloader.sampler, RandomSampler
) )
if version.parse(torch.__version__) < version.parse("1.11") or not is_random_sampler: if is_torch_less_than_1_11 or not is_random_sampler:
# We just need to begin an iteration to create the randomization of the sampler. # We just need to begin an iteration to create the randomization of the sampler.
# That was before PyTorch 1.11 however... # That was before PyTorch 1.11 however...
for _ in train_dataloader: for _ in train_dataloader:
...@@ -2430,7 +2435,7 @@ class Trainer: ...@@ -2430,7 +2435,7 @@ class Trainer:
arguments, depending on the situation. arguments, depending on the situation.
""" """
if self.use_cuda_amp or self.use_cpu_amp: if self.use_cuda_amp or self.use_cpu_amp:
if version.parse(torch.__version__) >= version.parse("1.10"): if is_torch_greater_or_equal_than_1_10:
ctx_manager = ( ctx_manager = (
torch.cpu.amp.autocast(dtype=self.amp_dtype) torch.cpu.amp.autocast(dtype=self.amp_dtype)
if self.use_cpu_amp if self.use_cpu_amp
......
...@@ -835,7 +835,7 @@ def _get_learning_rate(self): ...@@ -835,7 +835,7 @@ def _get_learning_rate(self):
last_lr = ( last_lr = (
# backward compatibility for pytorch schedulers # backward compatibility for pytorch schedulers
self.lr_scheduler.get_last_lr()[0] self.lr_scheduler.get_last_lr()[0]
if version.parse(torch.__version__) >= version.parse("1.4") if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.4")
else self.lr_scheduler.get_lr()[0] else self.lr_scheduler.get_lr()[0]
) )
return last_lr return last_lr
......
...@@ -300,7 +300,7 @@ def is_torch_bf16_gpu_available(): ...@@ -300,7 +300,7 @@ def is_torch_bf16_gpu_available():
# 4. torch.autocast exists # 4. torch.autocast exists
# XXX: one problem here is that it may give invalid results on mixed gpus setup, so it's # XXX: one problem here is that it may give invalid results on mixed gpus setup, so it's
# really only correct for the 0th gpu (or currently set default device if different from 0) # really only correct for the 0th gpu (or currently set default device if different from 0)
if version.parse(torch.__version__) < version.parse("1.10"): if version.parse(version.parse(torch.__version__).base_version) < version.parse("1.10"):
return False return False
if torch.cuda.is_available() and torch.version.cuda is not None: if torch.cuda.is_available() and torch.version.cuda is not None:
...@@ -322,7 +322,7 @@ def is_torch_bf16_cpu_available(): ...@@ -322,7 +322,7 @@ def is_torch_bf16_cpu_available():
import torch import torch
if version.parse(torch.__version__) < version.parse("1.10"): if version.parse(version.parse(torch.__version__).base_version) < version.parse("1.10"):
return False return False
try: try:
...@@ -357,7 +357,7 @@ def is_torch_tf32_available(): ...@@ -357,7 +357,7 @@ def is_torch_tf32_available():
return False return False
if int(torch.version.cuda.split(".")[0]) < 11: if int(torch.version.cuda.split(".")[0]) < 11:
return False return False
if version.parse(torch.__version__) < version.parse("1.7"): if version.parse(version.parse(torch.__version__).base_version) < version.parse("1.7"):
return False return False
return True return True
......
...@@ -22,7 +22,6 @@ import os ...@@ -22,7 +22,6 @@ import os
import torch import torch
import torch.utils.checkpoint import torch.utils.checkpoint
from packaging import version
from torch import nn from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from typing import Optional, Tuple, Union from typing import Optional, Tuple, Union
...@@ -48,6 +47,7 @@ from ...pytorch_utils import ( ...@@ -48,6 +47,7 @@ from ...pytorch_utils import (
apply_chunking_to_forward, apply_chunking_to_forward,
find_pruneable_heads_and_indices, find_pruneable_heads_and_indices,
prune_linear_layer, prune_linear_layer,
is_torch_greater_than_1_6,
) )
from ...utils import logging from ...utils import logging
from .configuration_{{cookiecutter.lowercase_modelname}} import {{cookiecutter.camelcase_modelname}}Config from .configuration_{{cookiecutter.lowercase_modelname}} import {{cookiecutter.camelcase_modelname}}Config
...@@ -157,7 +157,7 @@ class {{cookiecutter.camelcase_modelname}}Embeddings(nn.Module): ...@@ -157,7 +157,7 @@ class {{cookiecutter.camelcase_modelname}}Embeddings(nn.Module):
# position_ids (1, len position emb) is contiguous in memory and exported when serialized # position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
if version.parse(torch.__version__) > version.parse("1.6.0"): if is_torch_greater_than_1_6:
self.register_buffer( self.register_buffer(
"token_type_ids", "token_type_ids",
torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device), torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device),
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment