Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
a827c123
"git@developer.sourcefind.cn:OpenDAS/dlib.git" did not exist on "567b807e56cec32e781f5cd0d126962aee91534c"
Commit
a827c123
authored
Oct 01, 2021
by
Vishnu Banna
Browse files
yolo input test
parent
49abb4ac
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
122 additions
and
20 deletions
+122
-20
official/vision/beta/projects/yolo/configs/experiments/yolov4-csp/inference/640.yaml
...ts/yolo/configs/experiments/yolov4-csp/inference/640.yaml
+6
-4
official/vision/beta/projects/yolo/dataloaders/yolo_input_test.py
.../vision/beta/projects/yolo/dataloaders/yolo_input_test.py
+91
-0
official/vision/beta/projects/yolo/modeling/yolo_model.py
official/vision/beta/projects/yolo/modeling/yolo_model.py
+1
-0
official/vision/beta/projects/yolo/optimization/sgd_torch.py
official/vision/beta/projects/yolo/optimization/sgd_torch.py
+24
-16
No files found.
official/vision/beta/projects/yolo/configs/experiments/yolov4-csp/inference/640.yaml
View file @
a827c123
...
@@ -27,9 +27,11 @@ task:
...
@@ -27,9 +27,11 @@ task:
'
all'
:
2.0
'
all'
:
2.0
max_boxes
:
300
max_boxes
:
300
nms_type
:
greedy
nms_type
:
greedy
iou_thresh
:
0.25
# iou_thresh: 0.25
nms_thresh
:
0.45
# nms_thresh: 0.45
pre_nms_points
:
500
iou_thresh
:
0.001
nms_thresh
:
0.60
pre_nms_points
:
5000
loss
:
loss
:
use_scaled_loss
:
true
use_scaled_loss
:
true
update_on_repeat
:
true
update_on_repeat
:
true
...
@@ -70,5 +72,5 @@ task:
...
@@ -70,5 +72,5 @@ task:
aug_rand_translate
:
0.1
aug_rand_translate
:
0.1
area_thresh
:
0.1
area_thresh
:
0.1
validation_data
:
validation_data
:
global_batch_size
:
1
global_batch_size
:
8
input_path
:
'
/media/vbanna/DATA_SHARE/CV/datasets/COCO_raw/records/val*'
input_path
:
'
/media/vbanna/DATA_SHARE/CV/datasets/COCO_raw/records/val*'
\ No newline at end of file
official/vision/beta/projects/yolo/dataloaders/yolo_input_test.py
0 → 100755
View file @
a827c123
from
official.vision.beta.projects.yolo.common
import
registry_imports
# pylint: disable=unused-import
from
official.vision.beta.projects.yolo.tasks
import
image_classification
as
imc
from
official.vision.beta.projects.yolo.configs
import
darknet_classification
as
dcfg
import
os
import
tensorflow
as
tf
from
official.core
import
train_utils
from
official.core
import
task_factory
from
absl.testing
import
parameterized
PATH_TO_COCO
=
'/media/vbanna/DATA_SHARE/CV/datasets/COCO_raw/records/'
def
test_yolo_input_task
(
scaled_pipeline
=
False
,
batch_size
=
1
):
if
not
scaled_pipeline
:
experiment
=
"yolo_darknet"
config_path
=
[
"official/vision/beta/projects/yolo/configs/experiments/yolov4/tpu/512.yaml"
]
else
:
experiment
=
"scaled_yolo"
config_path
=
[
"official/vision/beta/projects/yolo/configs/experiments/yolov4-csp/tpu/640.yaml"
]
config
=
train_utils
.
ParseConfigOptions
(
experiment
=
experiment
,
config_file
=
config_path
)
params
=
train_utils
.
parse_configuration
(
config
)
config
=
params
.
task
task
=
task_factory
.
get_task
(
params
.
task
)
config
.
train_data
.
global_batch_size
=
batch_size
config
.
validation_data
.
global_batch_size
=
1
config
.
train_data
.
dtype
=
'float32'
config
.
validation_data
.
dtype
=
'float32'
config
.
validation_data
.
shuffle_buffer_size
=
1
config
.
train_data
.
shuffle_buffer_size
=
1
config
.
train_data
.
input_path
=
os
.
path
.
join
(
PATH_TO_COCO
,
'train*'
)
config
.
validation_data
.
input_path
=
os
.
path
.
join
(
PATH_TO_COCO
,
'val*'
)
with
tf
.
device
(
'/CPU:0'
):
train_data
=
task
.
build_inputs
(
config
.
train_data
)
test_data
=
task
.
build_inputs
(
config
.
validation_data
)
return
train_data
,
test_data
,
config
def
test_yolo_pipeline_visually
(
is_training
=
True
,
num
=
30
):
# visualize the datapipeline
import
matplotlib.pyplot
as
plt
dataset
,
testing
,
_
=
test_yolo_input_task
()
data
=
dataset
if
is_training
else
testing
data
=
data
.
take
(
num
)
for
l
,
(
image
,
label
)
in
enumerate
(
data
):
image
=
tf
.
image
.
draw_bounding_boxes
(
image
,
label
[
'bbox'
],
[[
1.0
,
0.0
,
1.0
]])
gt
=
label
[
'true_conf'
]
obj3
=
tf
.
clip_by_value
(
gt
[
'3'
][...,
0
],
0.0
,
1.0
)
obj4
=
tf
.
clip_by_value
(
gt
[
'4'
][...,
0
],
0.0
,
1.0
)
obj5
=
tf
.
clip_by_value
(
gt
[
'5'
][...,
0
],
0.0
,
1.0
)
for
shind
in
range
(
1
):
fig
,
axe
=
plt
.
subplots
(
1
,
4
)
image
=
image
[
shind
]
axe
[
0
].
imshow
(
image
)
axe
[
1
].
imshow
(
obj3
[
shind
].
numpy
())
axe
[
2
].
imshow
(
obj4
[
shind
].
numpy
())
axe
[
3
].
imshow
(
obj5
[
shind
].
numpy
())
fig
.
set_size_inches
(
18.5
,
6.5
,
forward
=
True
)
plt
.
tight_layout
()
plt
.
show
()
class
YoloDetectionInputTest
(
tf
.
test
.
TestCase
,
parameterized
.
TestCase
):
@
parameterized
.
named_parameters
((
'scaled'
,
True
),
(
'darknet'
,
False
))
def
test_yolo_input
(
self
,
scaled_pipeline
):
# builds a pipline forom the config and tests the datapipline shapes
dataset
,
_
,
params
=
test_yolo_input_task
(
scaled_pipeline
=
scaled_pipeline
,
batch_size
=
1
)
dataset
=
dataset
.
take
(
1
)
for
image
,
label
in
dataset
:
self
.
assertAllEqual
(
image
.
shape
,
([
1
]
+
params
.
model
.
input_size
))
self
.
assertTrue
(
tf
.
reduce_all
(
tf
.
math
.
logical_and
(
image
>=
0
,
image
<=
1
)))
if
__name__
==
'__main__'
:
tf
.
test
.
main
()
# test_yolo_pipeline_visually(is_training=True, num=20)
official/vision/beta/projects/yolo/modeling/yolo_model.py
View file @
a827c123
...
@@ -113,6 +113,7 @@ class Yolo(tf.keras.Model):
...
@@ -113,6 +113,7 @@ class Yolo(tf.keras.Model):
return
weights
,
bias
,
other
return
weights
,
bias
,
other
def
fuse
(
self
):
def
fuse
(
self
):
"""Fuses all Convolution and Batchnorm layers to get better latency."""
print
(
"Fusing Conv Batch Norm Layers."
)
print
(
"Fusing Conv Batch Norm Layers."
)
if
not
self
.
_fused
:
if
not
self
.
_fused
:
self
.
_fused
=
True
self
.
_fused
=
True
...
...
official/vision/beta/projects/yolo/optimization/sgd_torch.py
View file @
a827c123
...
@@ -30,27 +30,35 @@ def _var_key(var):
...
@@ -30,27 +30,35 @@ def _var_key(var):
class
SGDTorch
(
tf
.
keras
.
optimizers
.
Optimizer
):
class
SGDTorch
(
tf
.
keras
.
optimizers
.
Optimizer
):
"""Optimizer that
computes an exponential moving average of the variables.
"""Optimizer that
simulates the SGD module used in pytorch.
Empirically it has been found that using the moving average of the trained
parameters of a deep network is better than using its trained parameters
For details on the differences between the original SGD implemention and the
directly. This optimizer allows you to compute this moving average and swap
one in pytorch: https://pytorch.org/docs/stable/generated/torch.optim.SGD.html.
the variables at save time so that any code outside of the training loop
This optimizer also allow for the usage of a momentum warmup along side a
will use by default the average values instead of the original ones.
learning rate warm up, though using this is not required.
Example of usage for training:
Example of usage for training:
```python
```python
opt = tf.keras.optimizers.SGD(learning_rate)
opt = SGDTorch(learning_rate)
opt = ExponentialMovingAverage(opt)
opt.shadow_copy(model)
# Models must implement a method to iterate all model.trainable_variables
```
# and split the variables by key into the weights, biases, and others.
# Weight decay wil be applied to all variables in the weights group. Bias
# and others are included as a way to proved alternate LR scedules to various
# paramter groups. An example of this variable search can be found in
# official/vision/beta/projects/yolo/modeling/yolo_model.py.
At test time, swap the shadow variables to evaluate on the averaged weights:
weights, biases, other = model.get_groups()
```python
opt.set_params(weights, biases, other)
opt.swap_weights()
# Test eval the model here
# if the learning rate schedule on the biases are different. if lr is not set
opt.swap_weights()
# the default schedule used for weights will be used on the biases.
opt.set_bias_lr(<lr schedule>)
# if the learning rate schedule on the others are different. if lr is not set
# the default schedule used for weights will be used on the biases.
opt.set_other_lr(<lr schedule>)
```
```
"""
"""
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment