Unverified Commit a6d39f6a authored by Yuliang Liu's avatar Yuliang Liu Committed by GitHub
Browse files

Merge pull request #39 from Yuliang-Liu/dev

Data generation
parents c7341cda 2189c3c4
import torch
from fvcore.nn import giou_loss, smooth_l1_loss
from torch import nn
from torch.nn import functional as F
import fvcore.nn.weight_init as weight_init
from detectron2.config import configurable
from detectron2.layers import ShapeSpec, batched_nms, cat, cross_entropy, nonzero_tuple
from detectron2.modeling.roi_heads.fast_rcnn import FastRCNNOutputLayers
from detectron2.modeling.roi_heads.fast_rcnn import _log_classification_stats
__all__ = ["GRiTFastRCNNOutputLayers"]
class GRiTFastRCNNOutputLayers(FastRCNNOutputLayers):
@configurable
def __init__(
self,
input_shape: ShapeSpec,
**kwargs,
):
super().__init__(
input_shape=input_shape,
**kwargs,
)
input_size = input_shape.channels * \
(input_shape.width or 1) * (input_shape.height or 1)
self.bbox_pred = nn.Sequential(
nn.Linear(input_size, input_size),
nn.ReLU(inplace=True),
nn.Linear(input_size, 4)
)
weight_init.c2_xavier_fill(self.bbox_pred[0])
nn.init.normal_(self.bbox_pred[-1].weight, std=0.001)
nn.init.constant_(self.bbox_pred[-1].bias, 0)
@classmethod
def from_config(cls, cfg, input_shape):
ret = super().from_config(cfg, input_shape)
return ret
def losses(self, predictions, proposals):
scores, proposal_deltas = predictions
gt_classes = (
cat([p.gt_classes for p in proposals], dim=0) if len(proposals) else torch.empty(0)
)
num_classes = self.num_classes
_log_classification_stats(scores, gt_classes)
if len(proposals):
proposal_boxes = cat([p.proposal_boxes.tensor for p in proposals], dim=0) # Nx4
assert not proposal_boxes.requires_grad, "Proposals should not require gradients!"
gt_boxes = cat(
[(p.gt_boxes if p.has("gt_boxes") else p.proposal_boxes).tensor for p in proposals],
dim=0,
)
else:
proposal_boxes = gt_boxes = torch.empty((0, 4), device=proposal_deltas.device)
loss_cls = self.softmax_cross_entropy_loss(scores, gt_classes)
return {
"loss_cls": loss_cls,
"loss_box_reg": self.box_reg_loss(
proposal_boxes, gt_boxes, proposal_deltas, gt_classes,
num_classes=num_classes)
}
def softmax_cross_entropy_loss(self, pred_class_logits, gt_classes):
if pred_class_logits.numel() == 0:
return pred_class_logits.new_zeros([1])[0]
loss = F.cross_entropy(
pred_class_logits, gt_classes, reduction="mean")
return loss
def box_reg_loss(
self, proposal_boxes, gt_boxes, pred_deltas, gt_classes,
num_classes=-1):
num_classes = num_classes if num_classes > 0 else self.num_classes
box_dim = proposal_boxes.shape[1]
fg_inds = nonzero_tuple((gt_classes >= 0) & (gt_classes < num_classes))[0]
if pred_deltas.shape[1] == box_dim:
fg_pred_deltas = pred_deltas[fg_inds]
else:
fg_pred_deltas = pred_deltas.view(-1, self.num_classes, box_dim)[
fg_inds, gt_classes[fg_inds]
]
if self.box_reg_loss_type == "smooth_l1":
gt_pred_deltas = self.box2box_transform.get_deltas(
proposal_boxes[fg_inds],
gt_boxes[fg_inds],
)
loss_box_reg = smooth_l1_loss(
fg_pred_deltas, gt_pred_deltas, self.smooth_l1_beta, reduction="sum"
)
elif self.box_reg_loss_type == "giou":
fg_pred_boxes = self.box2box_transform.apply_deltas(
fg_pred_deltas, proposal_boxes[fg_inds]
)
loss_box_reg = giou_loss(fg_pred_boxes, gt_boxes[fg_inds], reduction="sum")
else:
raise ValueError(f"Invalid bbox reg loss type '{self.box_reg_loss_type}'")
return loss_box_reg / max(gt_classes.numel(), 1.0)
def predict_probs(self, predictions, proposals):
scores = predictions[0]
num_inst_per_image = [len(p) for p in proposals]
probs = F.softmax(scores, dim=-1)
return probs.split(num_inst_per_image, dim=0)
def forward(self, x):
if x.dim() > 2:
x = torch.flatten(x, start_dim=1)
scores = []
cls_scores = self.cls_score(x)
scores.append(cls_scores)
scores = torch.cat(scores, dim=1)
proposal_deltas = self.bbox_pred(x)
return scores, proposal_deltas
\ No newline at end of file
This diff is collapsed.
import torch
from detectron2.structures import Boxes, RotatedBoxes, pairwise_iou, pairwise_iou_rotated
def soft_nms(boxes, scores, method, gaussian_sigma, linear_threshold, prune_threshold):
"""
Performs soft non-maximum suppression algorithm on axis aligned boxes
Args:
boxes (Tensor[N, 5]):
boxes where NMS will be performed. They
are expected to be in (x_ctr, y_ctr, width, height, angle_degrees) format
scores (Tensor[N]):
scores for each one of the boxes
method (str):
one of ['gaussian', 'linear', 'hard']
see paper for details. users encouraged not to use "hard", as this is the
same nms available elsewhere in detectron2
gaussian_sigma (float):
parameter for Gaussian penalty function
linear_threshold (float):
iou threshold for applying linear decay. Nt from the paper
re-used as threshold for standard "hard" nms
prune_threshold (float):
boxes with scores below this threshold are pruned at each iteration.
Dramatically reduces computation time. Authors use values in [10e-4, 10e-2]
Returns:
tuple(Tensor, Tensor):
[0]: int64 tensor with the indices of the elements that have been kept
by Soft NMS, sorted in decreasing order of scores
[1]: float tensor with the re-scored scores of the elements that were kept
"""
return _soft_nms(
Boxes,
pairwise_iou,
boxes,
scores,
method,
gaussian_sigma,
linear_threshold,
prune_threshold,
)
def batched_soft_nms(
boxes, scores, idxs, method, gaussian_sigma, linear_threshold, prune_threshold
):
"""
Performs soft non-maximum suppression in a batched fashion.
Each index value correspond to a category, and NMS
will not be applied between elements of different categories.
Args:
boxes (Tensor[N, 4]):
boxes where NMS will be performed. They
are expected to be in (x1, y1, x2, y2) format
scores (Tensor[N]):
scores for each one of the boxes
idxs (Tensor[N]):
indices of the categories for each one of the boxes.
method (str):
one of ['gaussian', 'linear', 'hard']
see paper for details. users encouraged not to use "hard", as this is the
same nms available elsewhere in detectron2
gaussian_sigma (float):
parameter for Gaussian penalty function
linear_threshold (float):
iou threshold for applying linear decay. Nt from the paper
re-used as threshold for standard "hard" nms
prune_threshold (float):
boxes with scores below this threshold are pruned at each iteration.
Dramatically reduces computation time. Authors use values in [10e-4, 10e-2]
Returns:
tuple(Tensor, Tensor):
[0]: int64 tensor with the indices of the elements that have been kept
by Soft NMS, sorted in decreasing order of scores
[1]: float tensor with the re-scored scores of the elements that were kept
"""
if boxes.numel() == 0:
return (
torch.empty((0,), dtype=torch.int64, device=boxes.device),
torch.empty((0,), dtype=torch.float32, device=scores.device),
)
# strategy: in order to perform NMS independently per class.
# we add an offset to all the boxes. The offset is dependent
# only on the class idx, and is large enough so that boxes
# from different classes do not overlap
max_coordinate = boxes.max()
offsets = idxs.to(boxes) * (max_coordinate + 1)
boxes_for_nms = boxes + offsets[:, None]
return soft_nms(
boxes_for_nms, scores, method, gaussian_sigma, linear_threshold, prune_threshold
)
def _soft_nms(
box_class,
pairwise_iou_func,
boxes,
scores,
method,
gaussian_sigma,
linear_threshold,
prune_threshold,
):
"""
Soft non-max suppression algorithm.
Implementation of [Soft-NMS -- Improving Object Detection With One Line of Codec]
(https://arxiv.org/abs/1704.04503)
Args:
box_class (cls): one of Box, RotatedBoxes
pairwise_iou_func (func): one of pairwise_iou, pairwise_iou_rotated
boxes (Tensor[N, ?]):
boxes where NMS will be performed
if Boxes, in (x1, y1, x2, y2) format
if RotatedBoxes, in (x_ctr, y_ctr, width, height, angle_degrees) format
scores (Tensor[N]):
scores for each one of the boxes
method (str):
one of ['gaussian', 'linear', 'hard']
see paper for details. users encouraged not to use "hard", as this is the
same nms available elsewhere in detectron2
gaussian_sigma (float):
parameter for Gaussian penalty function
linear_threshold (float):
iou threshold for applying linear decay. Nt from the paper
re-used as threshold for standard "hard" nms
prune_threshold (float):
boxes with scores below this threshold are pruned at each iteration.
Dramatically reduces computation time. Authors use values in [10e-4, 10e-2]
Returns:
tuple(Tensor, Tensor):
[0]: int64 tensor with the indices of the elements that have been kept
by Soft NMS, sorted in decreasing order of scores
[1]: float tensor with the re-scored scores of the elements that were kept
"""
boxes = boxes.clone()
scores = scores.clone()
idxs = torch.arange(scores.size()[0])
idxs_out = []
scores_out = []
while scores.numel() > 0:
top_idx = torch.argmax(scores)
idxs_out.append(idxs[top_idx].item())
scores_out.append(scores[top_idx].item())
top_box = boxes[top_idx]
ious = pairwise_iou_func(box_class(top_box.unsqueeze(0)), box_class(boxes))[0]
if method == "linear":
decay = torch.ones_like(ious)
decay_mask = ious > linear_threshold
decay[decay_mask] = 1 - ious[decay_mask]
elif method == "gaussian":
decay = torch.exp(-torch.pow(ious, 2) / gaussian_sigma)
elif method == "hard": # standard NMS
decay = (ious < linear_threshold).float()
else:
raise NotImplementedError("{} soft nms method not implemented.".format(method))
scores *= decay
keep = scores > prune_threshold
keep[top_idx] = False
boxes = boxes[keep]
scores = scores[keep]
idxs = idxs[keep]
return torch.tensor(idxs_out).to(boxes.device), torch.tensor(scores_out).to(scores.device)
\ No newline at end of file
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
import json
import logging
import os
import shutil
import tempfile
import fnmatch
from functools import wraps
from hashlib import sha256
from io import open
import boto3
import requests
from botocore.exceptions import ClientError
from tqdm import tqdm
try:
from torch.hub import _get_torch_home
torch_cache_home = _get_torch_home()
except ImportError:
torch_cache_home = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(
os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch')))
default_cache_path = os.path.join(torch_cache_home, 'pytorch_transformers')
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
try:
from pathlib import Path
PYTORCH_PRETRAINED_BERT_CACHE = Path(
os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path))
except (AttributeError, ImportError):
PYTORCH_PRETRAINED_BERT_CACHE = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE',
default_cache_path)
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def url_to_filename(url, etag=None):
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the url's, delimited
by a period.
"""
url_bytes = url.encode('utf-8')
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode('utf-8')
etag_hash = sha256(etag_bytes)
filename += '.' + etag_hash.hexdigest()
return filename
def filename_to_url(filename, cache_dir=None):
"""
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise EnvironmentError("file {} not found".format(cache_path))
meta_path = cache_path + '.json'
if not os.path.exists(meta_path):
raise EnvironmentError("file {} not found".format(meta_path))
with open(meta_path, encoding="utf-8") as meta_file:
metadata = json.load(meta_file)
url = metadata['url']
etag = metadata['etag']
return url, etag
def cached_path(url_or_filename, cache_dir=None):
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if sys.version_info[0] == 3 and isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
parsed = urlparse(url_or_filename)
if parsed.scheme in ('http', 'https', 's3'):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(url_or_filename, cache_dir)
elif os.path.exists(url_or_filename):
# File, and it exists.
return url_or_filename
elif parsed.scheme == '':
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
def split_s3_path(url):
"""Split a full s3 path into the bucket name and path."""
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad s3 path {}".format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
# Remove '/' at beginning of path.
if s3_path.startswith("/"):
s3_path = s3_path[1:]
return bucket_name, s3_path
def s3_request(func):
"""
Wrapper function for s3 requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise EnvironmentError("file {} not found".format(url))
else:
raise
return wrapper
@s3_request
def s3_etag(url):
"""Check ETag on S3 object."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
@s3_request
def s3_get(url, temp_file):
"""Pull a file directly from S3."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
def http_get(url, temp_file):
req = requests.get(url, stream=True)
content_length = req.headers.get('Content-Length')
total = int(content_length) if content_length is not None else None
progress = tqdm(unit="B", total=total)
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def get_from_cache(url, cache_dir=None):
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if sys.version_info[0] == 2 and not isinstance(cache_dir, str):
cache_dir = str(cache_dir)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
# Get eTag to add to filename, if it exists.
if url.startswith("s3://"):
etag = s3_etag(url)
else:
try:
response = requests.head(url, allow_redirects=True)
if response.status_code != 200:
etag = None
else:
etag = response.headers.get("ETag")
except EnvironmentError:
etag = None
if sys.version_info[0] == 2 and etag is not None:
etag = etag.decode('utf-8')
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
# If we don't have a connection (etag is None) and can't identify the file
# try to get the last downloaded one
if not os.path.exists(cache_path) and etag is None:
matching_files = fnmatch.filter(os.listdir(cache_dir), filename + '.*')
matching_files = list(filter(lambda s: not s.endswith('.json'), matching_files))
if matching_files:
cache_path = os.path.join(cache_dir, matching_files[-1])
if not os.path.exists(cache_path):
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file:
logger.info("%s not found in cache, downloading to %s", url, temp_file.name)
# GET file object
if url.startswith("s3://"):
s3_get(url, temp_file)
else:
http_get(url, temp_file)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
with open(cache_path, 'wb') as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info("creating metadata file for %s", cache_path)
meta = {'url': url, 'etag': etag}
meta_path = cache_path + '.json'
with open(meta_path, 'w') as meta_file:
output_string = json.dumps(meta)
meta_file.write(output_string)
logger.info("removing temp file %s", temp_file.name)
return cache_path
import torch
class LoadTextTokens(object):
def __init__(self, tokenizer, max_text_len=40, padding='do_not_pad'):
self.tokenizer = tokenizer
self.max_text_len = max_text_len
self.padding = padding
def descriptions_to_text_tokens(self, target, begin_token):
target_encoding = self.tokenizer(
target, padding=self.padding,
add_special_tokens=False,
truncation=True, max_length=self.max_text_len)
need_predict = [1] * len(target_encoding['input_ids'])
payload = target_encoding['input_ids']
if len(payload) > self.max_text_len - 2:
payload = payload[-(self.max_text_len - 2):]
need_predict = payload[-(self.max_text_len - 2):]
input_ids = [begin_token] + payload + [self.tokenizer.sep_token_id]
need_predict = [0] + need_predict + [1]
data = {
'text_tokens': torch.tensor(input_ids),
'text_lengths': len(input_ids),
'need_predict': torch.tensor(need_predict),
}
return data
def __call__(self, object_descriptions, box_features, begin_token):
text_tokens = []
text_lengths = []
need_predict = []
for description in object_descriptions:
tokens = self.descriptions_to_text_tokens(description, begin_token)
text_tokens.append(tokens['text_tokens'])
text_lengths.append(tokens['text_lengths'])
need_predict.append(tokens['need_predict'])
text_tokens = torch.cat(self.collate(text_tokens), dim=0).to(box_features.device)
text_lengths = torch.tensor(text_lengths).to(box_features.device)
need_predict = torch.cat(self.collate(need_predict), dim=0).to(box_features.device)
assert text_tokens.dim() == 2 and need_predict.dim() == 2
data = {'text_tokens': text_tokens,
'text_lengths': text_lengths,
'need_predict': need_predict}
return data
def collate(self, batch):
if all(isinstance(b, torch.Tensor) for b in batch) and len(batch) > 0:
if not all(b.shape == batch[0].shape for b in batch[1:]):
assert all(len(b.shape) == len(batch[0].shape) for b in batch[1:])
shape = torch.tensor([b.shape for b in batch])
max_shape = tuple(shape.max(dim=0)[0].tolist())
batch2 = []
for b in batch:
if any(c < m for c, m in zip(b.shape, max_shape)):
b2 = torch.zeros(max_shape, dtype=b.dtype, device=b.device)
if b.dim() == 1:
b2[:b.shape[0]] = b
elif b.dim() == 2:
b2[:b.shape[0], :b.shape[1]] = b
elif b.dim() == 3:
b2[:b.shape[0], :b.shape[1], :b.shape[2]] = b
else:
raise NotImplementedError
b = b2
batch2.append(b[None, ...])
else:
batch2 = []
for b in batch:
batch2.append(b[None, ...])
return batch2
else:
raise NotImplementedError
This diff is collapsed.
This diff is collapsed.
import torch
from detectron2.engine.defaults import DefaultPredictor
from detectron2.utils.visualizer import ColorMode, Visualizer
class Visualizer_GRiT(Visualizer):
def __init__(self, image, instance_mode=None):
super().__init__(image, instance_mode=instance_mode)
def draw_instance_predictions(self, predictions):
boxes = predictions.pred_boxes if predictions.has("pred_boxes") else None
scores = predictions.scores if predictions.has("scores") else None
classes = predictions.pred_classes.tolist() if predictions.has("pred_classes") else None
object_description = predictions.pred_object_descriptions.data
if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("thing_colors"):
colors = [
self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in classes
]
alpha = 0.8
else:
colors = None
alpha = 0.5
if self._instance_mode == ColorMode.IMAGE_BW:
self.output.reset_image(
self._create_grayscale_image(
(predictions.pred_masks.any(dim=0) > 0).numpy()
if predictions.has("pred_masks")
else None
)
)
alpha = 0.3
self.overlay_instances(
masks=None,
boxes=boxes,
labels=object_description,
keypoints=None,
assigned_colors=colors,
alpha=alpha,
)
return self.output
class VisualizationDemo(object):
def __init__(self, cfg, instance_mode=ColorMode.IMAGE):
self.cpu_device = torch.device("cpu")
self.instance_mode = instance_mode
self.predictor = DefaultPredictor(cfg)
def run_on_image(self, image):
predictions = self.predictor(image)
# Convert image from OpenCV BGR format to Matplotlib RGB format.
image = image[:, :, ::-1]
visualizer = Visualizer_GRiT(image, instance_mode=self.instance_mode)
instances = predictions["instances"].to(self.cpu_device)
vis_output = visualizer.draw_instance_predictions(predictions=instances)
return predictions, vis_output
class BatchPredictor(DefaultPredictor):
def __init__(self, cfg):
super().__init__(cfg)
def __call__(self, original_images):
input_list=[]
with torch.no_grad(): # https://github.com/sphinx-doc/sphinx/issues/4258
# Apply pre-processing to image.
for original_image in original_images:
if self.input_format == "RGB":
# whether the model expects BGR inputs or RGB
original_image = original_image[:, :, ::-1]
height, width = original_image.shape[:2]
image = self.aug.get_transform(original_image).apply_image(original_image)
image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1))
input = {"image": image, "height": height, "width": width}
input_list.append(input)
predictions = self.model(input_list)
return predictions
class BatchVisualizationDemo(object):
def __init__(self, cfg):
self.cpu_device = torch.device("cpu")
self.predictor = BatchPredictor(cfg)
def run_on_images(self, images):
predictions = self.predictor(images)
return predictions
\ No newline at end of file
version: 2.1
# -------------------------------------------------------------------------------------
# Environments to run the jobs in
# -------------------------------------------------------------------------------------
cpu: &cpu
machine:
image: ubuntu-2004:202107-02
resource_class: medium
gpu: &gpu
machine:
# NOTE: use a cuda vesion that's supported by all our pytorch versions
image: ubuntu-1604-cuda-11.1:202012-01
resource_class: gpu.nvidia.small
windows-cpu: &windows_cpu
machine:
resource_class: windows.medium
image: windows-server-2019-vs2019:stable
shell: powershell.exe
# windows-gpu: &windows_gpu
# machine:
# resource_class: windows.gpu.nvidia.medium
# image: windows-server-2019-nvidia:stable
version_parameters: &version_parameters
parameters:
pytorch_version:
type: string
torchvision_version:
type: string
pytorch_index:
type: string
# use test wheels index to have access to RC wheels
# https://download.pytorch.org/whl/test/torch_test.html
default: "https://download.pytorch.org/whl/torch_stable.html"
python_version: # NOTE: only affect linux
type: string
default: '3.6.8'
environment:
PYTORCH_VERSION: << parameters.pytorch_version >>
TORCHVISION_VERSION: << parameters.torchvision_version >>
PYTORCH_INDEX: << parameters.pytorch_index >>
PYTHON_VERSION: << parameters.python_version>>
# point datasets to ~/.torch so it's cached in CI
DETECTRON2_DATASETS: ~/.torch/datasets
# -------------------------------------------------------------------------------------
# Re-usable commands
# -------------------------------------------------------------------------------------
# install_nvidia_driver: &install_nvidia_driver
# - run:
# name: Install nvidia driver
# working_directory: ~/
# command: |
# wget -q 'https://s3.amazonaws.com/ossci-linux/nvidia_driver/NVIDIA-Linux-x86_64-430.40.run'
# sudo /bin/bash ./NVIDIA-Linux-x86_64-430.40.run -s --no-drm
# nvidia-smi
add_ssh_keys: &add_ssh_keys
# https://circleci.com/docs/2.0/add-ssh-key/
- add_ssh_keys:
fingerprints:
- "e4:13:f2:22:d4:49:e8:e4:57:5a:ac:20:2f:3f:1f:ca"
install_python: &install_python
- run:
name: Install Python
working_directory: ~/
command: |
# upgrade pyenv
cd /opt/circleci/.pyenv/plugins/python-build/../.. && git pull && cd -
pyenv install -s $PYTHON_VERSION
pyenv global $PYTHON_VERSION
python --version
which python
pip install --upgrade pip
setup_venv: &setup_venv
- run:
name: Setup Virtual Env
working_directory: ~/
command: |
python -m venv ~/venv
echo ". ~/venv/bin/activate" >> $BASH_ENV
. ~/venv/bin/activate
python --version
which python
which pip
pip install --upgrade pip
setup_venv_win: &setup_venv_win
- run:
name: Setup Virutal Env for Windows
command: |
pip install virtualenv
python -m virtualenv env
.\env\Scripts\activate
python --version
which python
which pip
install_linux_dep: &install_linux_dep
- run:
name: Install Dependencies
command: |
# disable crash coredump, so unittests fail fast
sudo systemctl stop apport.service
# install from github to get latest; install iopath first since fvcore depends on it
pip install --progress-bar off -U 'git+https://github.com/facebookresearch/iopath'
pip install --progress-bar off -U 'git+https://github.com/facebookresearch/fvcore'
# Don't use pytest-xdist: cuda tests are unstable under multi-process workers.
pip install --progress-bar off ninja opencv-python-headless pytest tensorboard pycocotools
pip install --progress-bar off torch==$PYTORCH_VERSION -f $PYTORCH_INDEX
if [[ "$TORCHVISION_VERSION" == "master" ]]; then
pip install git+https://github.com/pytorch/vision.git
else
pip install --progress-bar off torchvision==$TORCHVISION_VERSION -f $PYTORCH_INDEX
fi
python -c 'import torch; print("CUDA:", torch.cuda.is_available())'
gcc --version
install_detectron2: &install_detectron2
- run:
name: Install Detectron2
command: |
# Remove first, in case it's in the CI cache
pip uninstall -y detectron2
pip install --progress-bar off -e .[all]
python -m detectron2.utils.collect_env
./datasets/prepare_for_tests.sh
run_unittests: &run_unittests
- run:
name: Run Unit Tests
command: |
pytest -v --durations=15 tests # parallel causes some random failures
# -------------------------------------------------------------------------------------
# Jobs to run
# -------------------------------------------------------------------------------------
jobs:
linux_cpu_tests:
<<: *cpu
<<: *version_parameters
working_directory: ~/detectron2
steps:
- checkout
# Cache the venv directory that contains python, dependencies, and checkpoints
# Refresh the key when dependencies should be updated (e.g. when pytorch releases)
- restore_cache:
keys:
- cache-{{ arch }}-<< parameters.pytorch_version >>-{{ .Branch }}-20210827
- <<: *install_python
- <<: *install_linux_dep
- <<: *install_detectron2
- <<: *run_unittests
- save_cache:
paths:
- /opt/circleci/.pyenv
- ~/.torch
key: cache-{{ arch }}-<< parameters.pytorch_version >>-{{ .Branch }}-20210827
linux_gpu_tests:
<<: *gpu
<<: *version_parameters
working_directory: ~/detectron2
steps:
- checkout
- restore_cache:
keys:
- cache-{{ arch }}-<< parameters.pytorch_version >>-{{ .Branch }}-20210827
- <<: *install_python
- <<: *install_linux_dep
- <<: *install_detectron2
- <<: *run_unittests
- save_cache:
paths:
- /opt/circleci/.pyenv
- ~/.torch
key: cache-{{ arch }}-<< parameters.pytorch_version >>-{{ .Branch }}-20210827
windows_cpu_build:
<<: *windows_cpu
<<: *version_parameters
steps:
- <<: *add_ssh_keys
- checkout
- <<: *setup_venv_win
# Cache the env directory that contains dependencies
- restore_cache:
keys:
- cache-{{ arch }}-<< parameters.pytorch_version >>-{{ .Branch }}-20210404
- run:
name: Install Dependencies
command: |
pip install certifi --ignore-installed # required on windows to workaround some cert issue
pip install numpy cython # required on windows before pycocotools
pip install opencv-python-headless pytest-xdist pycocotools tensorboard
pip install -U git+https://github.com/facebookresearch/iopath
pip install -U git+https://github.com/facebookresearch/fvcore
pip install torch==$env:PYTORCH_VERSION torchvision==$env:TORCHVISION_VERSION -f $env:PYTORCH_INDEX
- save_cache:
paths:
- env
key: cache-{{ arch }}-<< parameters.pytorch_version >>-{{ .Branch }}-20210404
- <<: *install_detectron2
# TODO: unittest fails for now
workflows:
version: 2
regular_test:
jobs:
- linux_cpu_tests:
name: linux_cpu_tests_pytorch1.10
pytorch_version: '1.10.0+cpu'
torchvision_version: '0.11.1+cpu'
- linux_gpu_tests:
name: linux_gpu_tests_pytorch1.8
pytorch_version: '1.8.1+cu111'
torchvision_version: '0.9.1+cu111'
- linux_gpu_tests:
name: linux_gpu_tests_pytorch1.9
pytorch_version: '1.9+cu111'
torchvision_version: '0.10+cu111'
- linux_gpu_tests:
name: linux_gpu_tests_pytorch1.10
pytorch_version: '1.10+cu111'
torchvision_version: '0.11.1+cu111'
- linux_gpu_tests:
name: linux_gpu_tests_pytorch1.10_python39
pytorch_version: '1.10+cu111'
torchvision_version: '0.11.1+cu111'
python_version: '3.9.6'
- windows_cpu_build:
pytorch_version: '1.10+cpu'
torchvision_version: '0.11.1+cpu'
This diff is collapsed.
# This is an example .flake8 config, used when developing *Black* itself.
# Keep in sync with setup.cfg which is used for source packages.
[flake8]
ignore = W503, E203, E221, C901, C408, E741, C407, B017
max-line-length = 100
max-complexity = 18
select = B,C,E,F,W,T4,B9
exclude = build
per-file-ignores =
**/__init__.py:F401,F403,E402
**/configs/**.py:F401,E402
configs/**.py:F401,E402
**/tests/config/**.py:F401,E402
tests/config/**.py:F401,E402
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment