Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
detr_pytorch
Commits
0e92bd4d
Commit
0e92bd4d
authored
Dec 05, 2023
by
chenych
Browse files
First commit
parents
Pipeline
#658
failed with stages
in 0 seconds
Changes
44
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
673 additions
and
0 deletions
+673
-0
util/box_ops.py
util/box_ops.py
+88
-0
util/misc.py
util/misc.py
+468
-0
util/plot_utils.py
util/plot_utils.py
+107
-0
val.sh
val.sh
+10
-0
No files found.
util/box_ops.py
0 → 100644
View file @
0e92bd4d
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Utilities for bounding box manipulation and GIoU.
"""
import
torch
from
torchvision.ops.boxes
import
box_area
def
box_cxcywh_to_xyxy
(
x
):
x_c
,
y_c
,
w
,
h
=
x
.
unbind
(
-
1
)
b
=
[(
x_c
-
0.5
*
w
),
(
y_c
-
0.5
*
h
),
(
x_c
+
0.5
*
w
),
(
y_c
+
0.5
*
h
)]
return
torch
.
stack
(
b
,
dim
=-
1
)
def
box_xyxy_to_cxcywh
(
x
):
x0
,
y0
,
x1
,
y1
=
x
.
unbind
(
-
1
)
b
=
[(
x0
+
x1
)
/
2
,
(
y0
+
y1
)
/
2
,
(
x1
-
x0
),
(
y1
-
y0
)]
return
torch
.
stack
(
b
,
dim
=-
1
)
# modified from torchvision to also return the union
def
box_iou
(
boxes1
,
boxes2
):
area1
=
box_area
(
boxes1
)
area2
=
box_area
(
boxes2
)
lt
=
torch
.
max
(
boxes1
[:,
None
,
:
2
],
boxes2
[:,
:
2
])
# [N,M,2]
rb
=
torch
.
min
(
boxes1
[:,
None
,
2
:],
boxes2
[:,
2
:])
# [N,M,2]
wh
=
(
rb
-
lt
).
clamp
(
min
=
0
)
# [N,M,2]
inter
=
wh
[:,
:,
0
]
*
wh
[:,
:,
1
]
# [N,M]
union
=
area1
[:,
None
]
+
area2
-
inter
iou
=
inter
/
union
return
iou
,
union
def
generalized_box_iou
(
boxes1
,
boxes2
):
"""
Generalized IoU from https://giou.stanford.edu/
The boxes should be in [x0, y0, x1, y1] format
Returns a [N, M] pairwise matrix, where N = len(boxes1)
and M = len(boxes2)
"""
# degenerate boxes gives inf / nan results
# so do an early check
assert
(
boxes1
[:,
2
:]
>=
boxes1
[:,
:
2
]).
all
()
assert
(
boxes2
[:,
2
:]
>=
boxes2
[:,
:
2
]).
all
()
iou
,
union
=
box_iou
(
boxes1
,
boxes2
)
lt
=
torch
.
min
(
boxes1
[:,
None
,
:
2
],
boxes2
[:,
:
2
])
rb
=
torch
.
max
(
boxes1
[:,
None
,
2
:],
boxes2
[:,
2
:])
wh
=
(
rb
-
lt
).
clamp
(
min
=
0
)
# [N,M,2]
area
=
wh
[:,
:,
0
]
*
wh
[:,
:,
1
]
return
iou
-
(
area
-
union
)
/
area
def
masks_to_boxes
(
masks
):
"""Compute the bounding boxes around the provided masks
The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions.
Returns a [N, 4] tensors, with the boxes in xyxy format
"""
if
masks
.
numel
()
==
0
:
return
torch
.
zeros
((
0
,
4
),
device
=
masks
.
device
)
h
,
w
=
masks
.
shape
[
-
2
:]
y
=
torch
.
arange
(
0
,
h
,
dtype
=
torch
.
float
)
x
=
torch
.
arange
(
0
,
w
,
dtype
=
torch
.
float
)
y
,
x
=
torch
.
meshgrid
(
y
,
x
)
x_mask
=
(
masks
*
x
.
unsqueeze
(
0
))
x_max
=
x_mask
.
flatten
(
1
).
max
(
-
1
)[
0
]
x_min
=
x_mask
.
masked_fill
(
~
(
masks
.
bool
()),
1e8
).
flatten
(
1
).
min
(
-
1
)[
0
]
y_mask
=
(
masks
*
y
.
unsqueeze
(
0
))
y_max
=
y_mask
.
flatten
(
1
).
max
(
-
1
)[
0
]
y_min
=
y_mask
.
masked_fill
(
~
(
masks
.
bool
()),
1e8
).
flatten
(
1
).
min
(
-
1
)[
0
]
return
torch
.
stack
([
x_min
,
y_min
,
x_max
,
y_max
],
1
)
util/misc.py
0 → 100644
View file @
0e92bd4d
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Misc functions, including distributed helpers.
Mostly copy-paste from torchvision references.
"""
import
os
import
subprocess
import
time
from
collections
import
defaultdict
,
deque
import
datetime
import
pickle
from
packaging
import
version
from
typing
import
Optional
,
List
import
torch
import
torch.distributed
as
dist
from
torch
import
Tensor
# needed due to empty tensor bug in pytorch and torchvision 0.5
import
torchvision
if
version
.
parse
(
torchvision
.
__version__
)
<
version
.
parse
(
'0.7'
):
from
torchvision.ops
import
_new_empty_tensor
from
torchvision.ops.misc
import
_output_size
class
SmoothedValue
(
object
):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def
__init__
(
self
,
window_size
=
20
,
fmt
=
None
):
if
fmt
is
None
:
fmt
=
"{median:.4f} ({global_avg:.4f})"
self
.
deque
=
deque
(
maxlen
=
window_size
)
self
.
total
=
0.0
self
.
count
=
0
self
.
fmt
=
fmt
def
update
(
self
,
value
,
n
=
1
):
self
.
deque
.
append
(
value
)
self
.
count
+=
n
self
.
total
+=
value
*
n
def
synchronize_between_processes
(
self
):
"""
Warning: does not synchronize the deque!
"""
if
not
is_dist_avail_and_initialized
():
return
t
=
torch
.
tensor
([
self
.
count
,
self
.
total
],
dtype
=
torch
.
float64
,
device
=
'cuda'
)
dist
.
barrier
()
dist
.
all_reduce
(
t
)
t
=
t
.
tolist
()
self
.
count
=
int
(
t
[
0
])
self
.
total
=
t
[
1
]
@
property
def
median
(
self
):
d
=
torch
.
tensor
(
list
(
self
.
deque
))
return
d
.
median
().
item
()
@
property
def
avg
(
self
):
d
=
torch
.
tensor
(
list
(
self
.
deque
),
dtype
=
torch
.
float32
)
return
d
.
mean
().
item
()
@
property
def
global_avg
(
self
):
return
self
.
total
/
self
.
count
@
property
def
max
(
self
):
return
max
(
self
.
deque
)
@
property
def
value
(
self
):
return
self
.
deque
[
-
1
]
def
__str__
(
self
):
return
self
.
fmt
.
format
(
median
=
self
.
median
,
avg
=
self
.
avg
,
global_avg
=
self
.
global_avg
,
max
=
self
.
max
,
value
=
self
.
value
)
def
all_gather
(
data
):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors)
Args:
data: any picklable object
Returns:
list[data]: list of data gathered from each rank
"""
world_size
=
get_world_size
()
if
world_size
==
1
:
return
[
data
]
# serialized to a Tensor
buffer
=
pickle
.
dumps
(
data
)
storage
=
torch
.
ByteStorage
.
from_buffer
(
buffer
)
tensor
=
torch
.
ByteTensor
(
storage
).
to
(
"cuda"
)
# obtain Tensor size of each rank
local_size
=
torch
.
tensor
([
tensor
.
numel
()],
device
=
"cuda"
)
size_list
=
[
torch
.
tensor
([
0
],
device
=
"cuda"
)
for
_
in
range
(
world_size
)]
dist
.
all_gather
(
size_list
,
local_size
)
size_list
=
[
int
(
size
.
item
())
for
size
in
size_list
]
max_size
=
max
(
size_list
)
# receiving Tensor from all ranks
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
tensor_list
=
[]
for
_
in
size_list
:
tensor_list
.
append
(
torch
.
empty
((
max_size
,),
dtype
=
torch
.
uint8
,
device
=
"cuda"
))
if
local_size
!=
max_size
:
padding
=
torch
.
empty
(
size
=
(
max_size
-
local_size
,),
dtype
=
torch
.
uint8
,
device
=
"cuda"
)
tensor
=
torch
.
cat
((
tensor
,
padding
),
dim
=
0
)
dist
.
all_gather
(
tensor_list
,
tensor
)
data_list
=
[]
for
size
,
tensor
in
zip
(
size_list
,
tensor_list
):
buffer
=
tensor
.
cpu
().
numpy
().
tobytes
()[:
size
]
data_list
.
append
(
pickle
.
loads
(
buffer
))
return
data_list
def
reduce_dict
(
input_dict
,
average
=
True
):
"""
Args:
input_dict (dict): all the values will be reduced
average (bool): whether to do average or sum
Reduce the values in the dictionary from all processes so that all processes
have the averaged results. Returns a dict with the same fields as
input_dict, after reduction.
"""
world_size
=
get_world_size
()
if
world_size
<
2
:
return
input_dict
with
torch
.
no_grad
():
names
=
[]
values
=
[]
# sort the keys so that they are consistent across processes
for
k
in
sorted
(
input_dict
.
keys
()):
names
.
append
(
k
)
values
.
append
(
input_dict
[
k
])
values
=
torch
.
stack
(
values
,
dim
=
0
)
dist
.
all_reduce
(
values
)
if
average
:
values
/=
world_size
reduced_dict
=
{
k
:
v
for
k
,
v
in
zip
(
names
,
values
)}
return
reduced_dict
class
MetricLogger
(
object
):
def
__init__
(
self
,
delimiter
=
"
\t
"
):
self
.
meters
=
defaultdict
(
SmoothedValue
)
self
.
delimiter
=
delimiter
def
update
(
self
,
**
kwargs
):
for
k
,
v
in
kwargs
.
items
():
if
isinstance
(
v
,
torch
.
Tensor
):
v
=
v
.
item
()
assert
isinstance
(
v
,
(
float
,
int
))
self
.
meters
[
k
].
update
(
v
)
def
__getattr__
(
self
,
attr
):
if
attr
in
self
.
meters
:
return
self
.
meters
[
attr
]
if
attr
in
self
.
__dict__
:
return
self
.
__dict__
[
attr
]
raise
AttributeError
(
"'{}' object has no attribute '{}'"
.
format
(
type
(
self
).
__name__
,
attr
))
def
__str__
(
self
):
loss_str
=
[]
for
name
,
meter
in
self
.
meters
.
items
():
loss_str
.
append
(
"{}: {}"
.
format
(
name
,
str
(
meter
))
)
return
self
.
delimiter
.
join
(
loss_str
)
def
synchronize_between_processes
(
self
):
for
meter
in
self
.
meters
.
values
():
meter
.
synchronize_between_processes
()
def
add_meter
(
self
,
name
,
meter
):
self
.
meters
[
name
]
=
meter
def
log_every
(
self
,
iterable
,
print_freq
,
header
=
None
):
i
=
0
if
not
header
:
header
=
''
start_time
=
time
.
time
()
end
=
time
.
time
()
iter_time
=
SmoothedValue
(
fmt
=
'{avg:.4f}'
)
data_time
=
SmoothedValue
(
fmt
=
'{avg:.4f}'
)
space_fmt
=
':'
+
str
(
len
(
str
(
len
(
iterable
))))
+
'd'
if
torch
.
cuda
.
is_available
():
log_msg
=
self
.
delimiter
.
join
([
header
,
'[{0'
+
space_fmt
+
'}/{1}]'
,
'eta: {eta}'
,
'{meters}'
,
'time: {time}'
,
'data: {data}'
,
'max mem: {memory:.0f}'
])
else
:
log_msg
=
self
.
delimiter
.
join
([
header
,
'[{0'
+
space_fmt
+
'}/{1}]'
,
'eta: {eta}'
,
'{meters}'
,
'time: {time}'
,
'data: {data}'
])
MB
=
1024.0
*
1024.0
for
obj
in
iterable
:
data_time
.
update
(
time
.
time
()
-
end
)
yield
obj
iter_time
.
update
(
time
.
time
()
-
end
)
if
i
%
print_freq
==
0
or
i
==
len
(
iterable
)
-
1
:
eta_seconds
=
iter_time
.
global_avg
*
(
len
(
iterable
)
-
i
)
eta_string
=
str
(
datetime
.
timedelta
(
seconds
=
int
(
eta_seconds
)))
if
torch
.
cuda
.
is_available
():
print
(
log_msg
.
format
(
i
,
len
(
iterable
),
eta
=
eta_string
,
meters
=
str
(
self
),
time
=
str
(
iter_time
),
data
=
str
(
data_time
),
memory
=
torch
.
cuda
.
max_memory_allocated
()
/
MB
))
else
:
print
(
log_msg
.
format
(
i
,
len
(
iterable
),
eta
=
eta_string
,
meters
=
str
(
self
),
time
=
str
(
iter_time
),
data
=
str
(
data_time
)))
i
+=
1
end
=
time
.
time
()
total_time
=
time
.
time
()
-
start_time
total_time_str
=
str
(
datetime
.
timedelta
(
seconds
=
int
(
total_time
)))
print
(
'{} Total time: {} ({:.4f} s / it)'
.
format
(
header
,
total_time_str
,
total_time
/
len
(
iterable
)))
def
get_sha
():
cwd
=
os
.
path
.
dirname
(
os
.
path
.
abspath
(
__file__
))
def
_run
(
command
):
return
subprocess
.
check_output
(
command
,
cwd
=
cwd
).
decode
(
'ascii'
).
strip
()
sha
=
'N/A'
diff
=
"clean"
branch
=
'N/A'
try
:
sha
=
_run
([
'git'
,
'rev-parse'
,
'HEAD'
])
subprocess
.
check_output
([
'git'
,
'diff'
],
cwd
=
cwd
)
diff
=
_run
([
'git'
,
'diff-index'
,
'HEAD'
])
diff
=
"has uncommited changes"
if
diff
else
"clean"
branch
=
_run
([
'git'
,
'rev-parse'
,
'--abbrev-ref'
,
'HEAD'
])
except
Exception
:
pass
message
=
f
"sha:
{
sha
}
, status:
{
diff
}
, branch:
{
branch
}
"
return
message
def
collate_fn
(
batch
):
batch
=
list
(
zip
(
*
batch
))
batch
[
0
]
=
nested_tensor_from_tensor_list
(
batch
[
0
])
return
tuple
(
batch
)
def
_max_by_axis
(
the_list
):
# type: (List[List[int]]) -> List[int]
maxes
=
the_list
[
0
]
for
sublist
in
the_list
[
1
:]:
for
index
,
item
in
enumerate
(
sublist
):
maxes
[
index
]
=
max
(
maxes
[
index
],
item
)
return
maxes
class
NestedTensor
(
object
):
def
__init__
(
self
,
tensors
,
mask
:
Optional
[
Tensor
]):
self
.
tensors
=
tensors
self
.
mask
=
mask
def
to
(
self
,
device
):
# type: (Device) -> NestedTensor # noqa
cast_tensor
=
self
.
tensors
.
to
(
device
)
mask
=
self
.
mask
if
mask
is
not
None
:
assert
mask
is
not
None
cast_mask
=
mask
.
to
(
device
)
else
:
cast_mask
=
None
return
NestedTensor
(
cast_tensor
,
cast_mask
)
def
decompose
(
self
):
return
self
.
tensors
,
self
.
mask
def
__repr__
(
self
):
return
str
(
self
.
tensors
)
def
nested_tensor_from_tensor_list
(
tensor_list
:
List
[
Tensor
]):
# TODO make this more general
if
tensor_list
[
0
].
ndim
==
3
:
if
torchvision
.
_is_tracing
():
# nested_tensor_from_tensor_list() does not export well to ONNX
# call _onnx_nested_tensor_from_tensor_list() instead
return
_onnx_nested_tensor_from_tensor_list
(
tensor_list
)
# TODO make it support different-sized images
max_size
=
_max_by_axis
([
list
(
img
.
shape
)
for
img
in
tensor_list
])
# min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list]))
batch_shape
=
[
len
(
tensor_list
)]
+
max_size
b
,
c
,
h
,
w
=
batch_shape
dtype
=
tensor_list
[
0
].
dtype
device
=
tensor_list
[
0
].
device
tensor
=
torch
.
zeros
(
batch_shape
,
dtype
=
dtype
,
device
=
device
)
mask
=
torch
.
ones
((
b
,
h
,
w
),
dtype
=
torch
.
bool
,
device
=
device
)
for
img
,
pad_img
,
m
in
zip
(
tensor_list
,
tensor
,
mask
):
pad_img
[:
img
.
shape
[
0
],
:
img
.
shape
[
1
],
:
img
.
shape
[
2
]].
copy_
(
img
)
m
[:
img
.
shape
[
1
],
:
img
.
shape
[
2
]]
=
False
else
:
raise
ValueError
(
'not supported'
)
return
NestedTensor
(
tensor
,
mask
)
# _onnx_nested_tensor_from_tensor_list() is an implementation of
# nested_tensor_from_tensor_list() that is supported by ONNX tracing.
@
torch
.
jit
.
unused
def
_onnx_nested_tensor_from_tensor_list
(
tensor_list
:
List
[
Tensor
])
->
NestedTensor
:
max_size
=
[]
for
i
in
range
(
tensor_list
[
0
].
dim
()):
max_size_i
=
torch
.
max
(
torch
.
stack
([
img
.
shape
[
i
]
for
img
in
tensor_list
]).
to
(
torch
.
float32
)).
to
(
torch
.
int64
)
max_size
.
append
(
max_size_i
)
max_size
=
tuple
(
max_size
)
# work around for
# pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
# m[: img.shape[1], :img.shape[2]] = False
# which is not yet supported in onnx
padded_imgs
=
[]
padded_masks
=
[]
for
img
in
tensor_list
:
padding
=
[(
s1
-
s2
)
for
s1
,
s2
in
zip
(
max_size
,
tuple
(
img
.
shape
))]
padded_img
=
torch
.
nn
.
functional
.
pad
(
img
,
(
0
,
padding
[
2
],
0
,
padding
[
1
],
0
,
padding
[
0
]))
padded_imgs
.
append
(
padded_img
)
m
=
torch
.
zeros_like
(
img
[
0
],
dtype
=
torch
.
int
,
device
=
img
.
device
)
padded_mask
=
torch
.
nn
.
functional
.
pad
(
m
,
(
0
,
padding
[
2
],
0
,
padding
[
1
]),
"constant"
,
1
)
padded_masks
.
append
(
padded_mask
.
to
(
torch
.
bool
))
tensor
=
torch
.
stack
(
padded_imgs
)
mask
=
torch
.
stack
(
padded_masks
)
return
NestedTensor
(
tensor
,
mask
=
mask
)
def
setup_for_distributed
(
is_master
):
"""
This function disables printing when not in master process
"""
import
builtins
as
__builtin__
builtin_print
=
__builtin__
.
print
def
print
(
*
args
,
**
kwargs
):
force
=
kwargs
.
pop
(
'force'
,
False
)
if
is_master
or
force
:
builtin_print
(
*
args
,
**
kwargs
)
__builtin__
.
print
=
print
def
is_dist_avail_and_initialized
():
if
not
dist
.
is_available
():
return
False
if
not
dist
.
is_initialized
():
return
False
return
True
def
get_world_size
():
if
not
is_dist_avail_and_initialized
():
return
1
return
dist
.
get_world_size
()
def
get_rank
():
if
not
is_dist_avail_and_initialized
():
return
0
return
dist
.
get_rank
()
def
is_main_process
():
return
get_rank
()
==
0
def
save_on_master
(
*
args
,
**
kwargs
):
if
is_main_process
():
torch
.
save
(
*
args
,
**
kwargs
)
def
init_distributed_mode
(
args
):
if
'RANK'
in
os
.
environ
and
'WORLD_SIZE'
in
os
.
environ
:
args
.
rank
=
int
(
os
.
environ
[
"RANK"
])
args
.
world_size
=
int
(
os
.
environ
[
'WORLD_SIZE'
])
args
.
gpu
=
int
(
os
.
environ
[
'LOCAL_RANK'
])
elif
'SLURM_PROCID'
in
os
.
environ
:
args
.
rank
=
int
(
os
.
environ
[
'SLURM_PROCID'
])
args
.
gpu
=
args
.
rank
%
torch
.
cuda
.
device_count
()
else
:
print
(
'Not using distributed mode'
)
args
.
distributed
=
False
return
args
.
distributed
=
True
torch
.
cuda
.
set_device
(
args
.
gpu
)
args
.
dist_backend
=
'nccl'
print
(
'| distributed init (rank {}): {}'
.
format
(
args
.
rank
,
args
.
dist_url
),
flush
=
True
)
torch
.
distributed
.
init_process_group
(
backend
=
args
.
dist_backend
,
init_method
=
args
.
dist_url
,
world_size
=
args
.
world_size
,
rank
=
args
.
rank
)
torch
.
distributed
.
barrier
()
setup_for_distributed
(
args
.
rank
==
0
)
@
torch
.
no_grad
()
def
accuracy
(
output
,
target
,
topk
=
(
1
,)):
"""Computes the precision@k for the specified values of k"""
if
target
.
numel
()
==
0
:
return
[
torch
.
zeros
([],
device
=
output
.
device
)]
maxk
=
max
(
topk
)
batch_size
=
target
.
size
(
0
)
_
,
pred
=
output
.
topk
(
maxk
,
1
,
True
,
True
)
pred
=
pred
.
t
()
correct
=
pred
.
eq
(
target
.
view
(
1
,
-
1
).
expand_as
(
pred
))
res
=
[]
for
k
in
topk
:
correct_k
=
correct
[:
k
].
view
(
-
1
).
float
().
sum
(
0
)
res
.
append
(
correct_k
.
mul_
(
100.0
/
batch_size
))
return
res
def
interpolate
(
input
,
size
=
None
,
scale_factor
=
None
,
mode
=
"nearest"
,
align_corners
=
None
):
# type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor
"""
Equivalent to nn.functional.interpolate, but with support for empty batch sizes.
This will eventually be supported natively by PyTorch, and this
class can go away.
"""
if
version
.
parse
(
torchvision
.
__version__
)
<
version
.
parse
(
'0.7'
):
if
input
.
numel
()
>
0
:
return
torch
.
nn
.
functional
.
interpolate
(
input
,
size
,
scale_factor
,
mode
,
align_corners
)
output_shape
=
_output_size
(
2
,
input
,
size
,
scale_factor
)
output_shape
=
list
(
input
.
shape
[:
-
2
])
+
list
(
output_shape
)
return
_new_empty_tensor
(
input
,
output_shape
)
else
:
return
torchvision
.
ops
.
misc
.
interpolate
(
input
,
size
,
scale_factor
,
mode
,
align_corners
)
util/plot_utils.py
0 → 100644
View file @
0e92bd4d
"""
Plotting utilities to visualize training logs.
"""
import
torch
import
pandas
as
pd
import
numpy
as
np
import
seaborn
as
sns
import
matplotlib.pyplot
as
plt
from
pathlib
import
Path
,
PurePath
def
plot_logs
(
logs
,
fields
=
(
'class_error'
,
'loss_bbox_unscaled'
,
'mAP'
),
ewm_col
=
0
,
log_name
=
'log.txt'
):
'''
Function to plot specific fields from training log(s). Plots both training and test results.
:: Inputs - logs = list containing Path objects, each pointing to individual dir with a log file
- fields = which results to plot from each log file - plots both training and test for each field.
- ewm_col = optional, which column to use as the exponential weighted smoothing of the plots
- log_name = optional, name of log file if different than default 'log.txt'.
:: Outputs - matplotlib plots of results in fields, color coded for each log file.
- solid lines are training results, dashed lines are test results.
'''
func_name
=
"plot_utils.py::plot_logs"
# verify logs is a list of Paths (list[Paths]) or single Pathlib object Path,
# convert single Path to list to avoid 'not iterable' error
if
not
isinstance
(
logs
,
list
):
if
isinstance
(
logs
,
PurePath
):
logs
=
[
logs
]
print
(
f
"
{
func_name
}
info: logs param expects a list argument, converted to list[Path]."
)
else
:
raise
ValueError
(
f
"
{
func_name
}
- invalid argument for logs parameter.
\n
\
Expect list[Path] or single Path obj, received
{
type
(
logs
)
}
"
)
# Quality checks - verify valid dir(s), that every item in list is Path object, and that log_name exists in each dir
for
i
,
dir
in
enumerate
(
logs
):
if
not
isinstance
(
dir
,
PurePath
):
raise
ValueError
(
f
"
{
func_name
}
- non-Path object in logs argument of
{
type
(
dir
)
}
:
\n
{
dir
}
"
)
if
not
dir
.
exists
():
raise
ValueError
(
f
"
{
func_name
}
- invalid directory in logs argument:
\n
{
dir
}
"
)
# verify log_name exists
fn
=
Path
(
dir
/
log_name
)
if
not
fn
.
exists
():
print
(
f
"-> missing
{
log_name
}
. Have you gotten to Epoch 1 in training?"
)
print
(
f
"--> full path of missing log file:
{
fn
}
"
)
return
# load log file(s) and plot
dfs
=
[
pd
.
read_json
(
Path
(
p
)
/
log_name
,
lines
=
True
)
for
p
in
logs
]
fig
,
axs
=
plt
.
subplots
(
ncols
=
len
(
fields
),
figsize
=
(
16
,
5
))
for
df
,
color
in
zip
(
dfs
,
sns
.
color_palette
(
n_colors
=
len
(
logs
))):
for
j
,
field
in
enumerate
(
fields
):
if
field
==
'mAP'
:
coco_eval
=
pd
.
DataFrame
(
np
.
stack
(
df
.
test_coco_eval_bbox
.
dropna
().
values
)[:,
1
]
).
ewm
(
com
=
ewm_col
).
mean
()
axs
[
j
].
plot
(
coco_eval
,
c
=
color
)
else
:
df
.
interpolate
().
ewm
(
com
=
ewm_col
).
mean
().
plot
(
y
=
[
f
'train_
{
field
}
'
,
f
'test_
{
field
}
'
],
ax
=
axs
[
j
],
color
=
[
color
]
*
2
,
style
=
[
'-'
,
'--'
]
)
for
ax
,
field
in
zip
(
axs
,
fields
):
ax
.
legend
([
Path
(
p
).
name
for
p
in
logs
])
ax
.
set_title
(
field
)
def
plot_precision_recall
(
files
,
naming_scheme
=
'iter'
):
if
naming_scheme
==
'exp_id'
:
# name becomes exp_id
names
=
[
f
.
parts
[
-
3
]
for
f
in
files
]
elif
naming_scheme
==
'iter'
:
names
=
[
f
.
stem
for
f
in
files
]
else
:
raise
ValueError
(
f
'not supported
{
naming_scheme
}
'
)
fig
,
axs
=
plt
.
subplots
(
ncols
=
2
,
figsize
=
(
16
,
5
))
for
f
,
color
,
name
in
zip
(
files
,
sns
.
color_palette
(
"Blues"
,
n_colors
=
len
(
files
)),
names
):
data
=
torch
.
load
(
f
)
# precision is n_iou, n_points, n_cat, n_area, max_det
precision
=
data
[
'precision'
]
recall
=
data
[
'params'
].
recThrs
scores
=
data
[
'scores'
]
# take precision for all classes, all areas and 100 detections
precision
=
precision
[
0
,
:,
:,
0
,
-
1
].
mean
(
1
)
scores
=
scores
[
0
,
:,
:,
0
,
-
1
].
mean
(
1
)
prec
=
precision
.
mean
()
rec
=
data
[
'recall'
][
0
,
:,
0
,
-
1
].
mean
()
print
(
f
'
{
naming_scheme
}
{
name
}
: mAP@50=
{
prec
*
100
:
05.1
f
}
, '
+
f
'score=
{
scores
.
mean
():
0.3
f
}
, '
+
f
'f1=
{
2
*
prec
*
rec
/
(
prec
+
rec
+
1e-8
):
0.3
f
}
'
)
axs
[
0
].
plot
(
recall
,
precision
,
c
=
color
)
axs
[
1
].
plot
(
recall
,
scores
,
c
=
color
)
axs
[
0
].
set_title
(
'Precision / Recall'
)
axs
[
0
].
legend
(
names
)
axs
[
1
].
set_title
(
'Scores / Recall'
)
axs
[
1
].
legend
(
names
)
return
fig
,
axs
val.sh
0 → 100644
View file @
0e92bd4d
#!/bin/bash
echo
"Testing start ..."
export
HIP_VISIBLE_DEVICES
=
1
export
HSA_FORCE_FINE_GRAIN_PCIE
=
1
export
USE_MIOPEN_BATCHNORM
=
1
# resume: 待测试模型地址
# coco_path: 训练数据集地址, 数据是coco format
python main.py
--batch_size
2
--no_aux_loss
--eval
--resume
/path/of/model
--coco_path
/path/of/coco_data
# python main.py --batch_size 2 --no_aux_loss --eval --resume https://dl.fbaipublicfiles.com/detr/detr-r50-e632da11.pth --coco_path /home/datasets/COCO2017
Prev
1
2
3
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment