Commit ac24a62a authored by mibaumgartner's avatar mibaumgartner
Browse files

pt 1.9 support and hydra 1.1.0 req

parent e6f7e946
......@@ -46,11 +46,9 @@ When running a training inside the container it is necessary to [increase the sh
## Source
1. Install CUDA (>10.1) and cudnn (make sure to select [compatible versions](https://docs.nvidia.com/deeplearning/cudnn/support-matrix/index.html)!)
2. [Optional] Depending on your GPU you might need to set `TORCH_CUDA_ARCH_LIST`, check [compute capabilities](https://developer.nvidia.com/cuda-gpus) here.
3. Install [torch](https://pytorch.org/) (make sure to match the pytorch and CUDA versions!) (requires pytorch >1.7+)
4. Install [torchvision](https://github.com/pytorch/vision) (make sure to match the versions!)
5. Clone nnDetection, `cd [path_to_repo]` and `pip install -e .`
6. Upgrade hydra to next release: `pip install hydra-core --upgrade --pre`
7. Set environment variables (more info can be found below):
3. Install [torch](https://pytorch.org/) (make sure to match the pytorch and CUDA versions!) (requires pytorch >1.7+) and [torchvision](https://github.com/pytorch/vision)(make sure to match the versions!).
4. Clone nnDetection, `cd [path_to_repo]` and `pip install -e .`
5. Set environment variables (more info can be found below):
- `det_data`: [required] Path to the source directory where all the data will be located
- `det_models`: [required] Path to directory where all models will be saved
- `OMP_NUM_THREADS=1` : [required] Needs to be set! Otherwise bad things will happen... Refer to batchgenerators documentation.
......
......@@ -94,7 +94,7 @@ def compute_anchors_for_strides(anchors: torch.Tensor,
return anchors_with_stride
class AnchorGenerator2D(AnchorGenerator):
class AnchorGenerator2D(torch.nn.Module):
def __init__(self, sizes: Sequence[Union[int, Sequence[int]]] = (128, 256, 512),
aspect_ratios: Sequence[Union[float, Sequence[float]]] = (0.5, 1.0, 2.0),
**kwargs):
......@@ -109,7 +109,18 @@ class AnchorGenerator2D(AnchorGenerator):
height/width, e.g. (0.5, 1, 2). if Seq[Seq] is provided, it should have
the same length as sizes
"""
super().__init__(sizes=sizes, aspect_ratios=aspect_ratios)
super().__init__()
if not isinstance(sizes[0], (list, tuple)):
sizes = tuple((s,) for s in sizes)
if not isinstance(aspect_ratios[0], (list, tuple)):
aspect_ratios = (aspect_ratios,) * len(sizes)
assert len(sizes) == len(aspect_ratios)
self.sizes = sizes
self.aspect_ratios = aspect_ratios
self.cell_anchors = None
self._cache = {}
self.num_anchors_per_level: List[int] = None
if kwargs:
logger.info(f"Discarding anchor generator kwargs {kwargs}")
......
......@@ -5,7 +5,8 @@ import re
import numpy as np
from torch import Tensor
from torch._six import container_abcs, string_classes, int_classes
from collections import abc
from torch._six import string_classes
from typing import Sequence, Union, Any, Mapping, Callable, List
np_str_obj_array_pattern = re.compile(r'[SaUO]')
......@@ -129,11 +130,11 @@ def to_tensor(inp: Any) -> Any:
and np_str_obj_array_pattern.search(inp.dtype.str) is not None:
return inp
return torch.as_tensor(inp)
elif isinstance(inp, container_abcs.Mapping):
elif isinstance(inp, abc.Mapping):
return {key: to_tensor(inp[key]) for key in inp}
elif isinstance(inp, tuple) and hasattr(inp, '_fields'): # namedtuple
return elem_type(*(to_tensor(d) for d in inp))
elif isinstance(inp, container_abcs.Sequence) and not isinstance(inp, string_classes):
elif isinstance(inp, abc.Sequence) and not isinstance(inp, string_classes):
return [to_tensor(d) for d in inp]
else:
return inp
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment