Commit 08ec7955 authored by anivegesana's avatar anivegesana
Browse files

Remove dead files

parent 230732b2
This source diff could not be displayed because it is too large. You can view the blob instead.
runtime:
distribution_strategy: 'mirrored'
mixed_precision_dtype: 'float16'
loss_scale: 'dynamic'
num_gpus: 2
task:
annotation_file: Null
init_checkpoint: Null
model:
num_classes: 80
input_size: [640, 640, 3]
min_level: 3
max_level: 7
losses:
l2_weight_decay: 0.0001
train_data:
input_path: Null
tfds_name: 'coco/2017'
tfds_split: 'train'
tfds_download: True
is_training: True
global_batch_size: 16
dtype: 'float16'
cycle_length: 5
decoder:
type: tfds_decoder
shuffle_buffer_size: 2
validation_data:
input_path: Null
tfds_name: 'coco/2017'
tfds_split: 'validation'
tfds_download: True
# tfds_skip_decoding_feature: source_id,image,height,width,groundtruth_classes,groundtruth_is_crowd,groundtruth_area,groundtruth_boxes
is_training: False
global_batch_size: 16
dtype: 'float16'
cycle_length: 10
decoder:
type: tfds_decoder
shuffle_buffer_size: 2
trainer:
train_steps: 532224
validation_steps: 1564
validation_interval: 2000
steps_per_loop: 200 #59136
summary_interval: 200 #59136
checkpoint_interval: 10000
optimizer_config:
optimizer:
type: 'sgd'
sgd:
momentum: 0.9
# learning_rate:
# type: 'cosine'
# cosine:
# initial_learning_rate: 0.0021875
# decay_steps: 4257792
# alpha: 0.01
# Stepwise version
learning_rate:
type: 'stepwise'
stepwise:
# boundaries: [26334, 30954]
boundaries: [421344, 495264]
# values: [0.28, 0.028, 0.0028]
values: [0.0175, 0.00175, 0.000175]
warmup:
type: 'linear'
linear:
warmup_steps: 20480
warmup_learning_rate: 0.0001634375
2020-10-30 19:06:27.235259: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudart.so.10.1
Traceback (most recent call last):
File "/opt/conda/lib/python3.7/runpy.py", line 183, in _run_module_as_main
mod_name, mod_spec, code = _get_module_details(mod_name, _Error)
File "/opt/conda/lib/python3.7/runpy.py", line 109, in _get_module_details
__import__(pkg_name)
File "/home/vbanna/tf-models/official/vision/beta/__init__.py", line 18, in <module>
from official.vision.beta import configs
File "/home/vbanna/tf-models/official/vision/beta/configs/__init__.py", line 18, in <module>
from official.vision.beta.configs import image_classification
File "/home/vbanna/tf-models/official/vision/beta/configs/image_classification.py", line 20, in <module>
from official.core import exp_factory
File "/home/vbanna/tf-models/official/core/exp_factory.py", line 19, in <module>
from official.modeling.hyperparams import config_definitions as cfg
File "/home/vbanna/tf-models/official/modeling/hyperparams/config_definitions.py", line 23, in <module>
from official.modeling.optimization.configs import optimization_config
File "/home/vbanna/tf-models/official/modeling/optimization/__init__.py", line 8, in <module>
from official.modeling.optimization.optimizer_factory import OptimizerFactory
File "/home/vbanna/tf-models/official/modeling/optimization/optimizer_factory.py", line 21, in <module>
import tensorflow_addons.optimizers as tfa_optimizers
ModuleNotFoundError: No module named 'tensorflow_addons'
2020-10-30 19:08:43.755187: I tensorflow/stream_executor/platform/default/dso_loader.cc:48] Successfully opened dynamic library libcudart.so.10.1
I1030 19:08:45.768239 139885496419712 train_utils.py:106] Final experiment parameters: {'runtime': {'all_reduce_alg': None,
'batchnorm_spatial_persistent': False,
'dataset_num_private_threads': None,
'default_shard_dim': -1,
'distribution_strategy': 'mirrored',
'enable_xla': False,
'gpu_thread_mode': None,
'loss_scale': 'dynamic',
'mixed_precision_dtype': 'float16',
'num_cores_per_replica': 1,
'num_gpus': 2,
'num_packs': 1,
'per_gpu_thread_count': 0,
'run_eagerly': False,
'task_index': -1,
'tpu': None,
'worker_hosts': None},
'task': {'annotation_file': None,
'gradient_clip_norm': 0.0,
'init_checkpoint': None,
'init_checkpoint_modules': 'backbone',
'losses': {'box_loss_weight': 50,
'focal_loss_alpha': 0.25,
'focal_loss_gamma': 1.5,
'huber_loss_delta': 0.1,
'l2_weight_decay': 0.0001},
'model': {'anchor': {'anchor_size': 4.0,
'aspect_ratios': [0.5, 1.0, 2.0],
'num_scales': 3},
'backbone': {'resnet': {'model_id': 50}, 'type': 'resnet'},
'decoder': {'fpn': {'num_filters': 256,
'use_separable_conv': False},
'type': 'fpn'},
'detection_generator': {'max_num_detections': 100,
'nms_iou_threshold': 0.5,
'pre_nms_score_threshold': 0.05,
'pre_nms_top_k': 5000,
'use_batched_nms': False},
'head': {'num_convs': 4,
'num_filters': 256,
'use_separable_conv': False},
'input_size': [640, 640, 3],
'max_level': 7,
'min_level': 3,
'norm_activation': {'activation': 'relu',
'norm_epsilon': 0.001,
'norm_momentum': 0.99,
'use_sync_bn': False},
'num_classes': 80},
'train_data': {'block_length': 1,
'cache': False,
'cycle_length': 5,
'decoder': {'tfds_decoder': {'regenerate_source_id': False},
'type': 'tfds_decoder'},
'deterministic': None,
'drop_remainder': True,
'dtype': 'float16',
'enable_tf_data_service': False,
'global_batch_size': 16,
'input_path': None,
'is_training': True,
'parser': {'aug_rand_hflip': True,
'aug_scale_max': 2.0,
'aug_scale_min': 0.5,
'match_threshold': 0.5,
'max_num_instances': 100,
'num_channels': 3,
'skip_crowd_during_training': True,
'unmatched_threshold': 0.5},
'sharding': True,
'shuffle_buffer_size': 2,
'tf_data_service_address': None,
'tf_data_service_job_name': None,
'tfds_as_supervised': False,
'tfds_data_dir': '',
'tfds_download': True,
'tfds_name': 'coco/2017',
'tfds_skip_decoding_feature': '',
'tfds_split': 'train'},
'validation_data': {'block_length': 1,
'cache': False,
'cycle_length': 10,
'decoder': {'tfds_decoder': {'regenerate_source_id': False},
'type': 'tfds_decoder'},
'deterministic': None,
'drop_remainder': True,
'dtype': 'float16',
'enable_tf_data_service': False,
'global_batch_size': 16,
'input_path': None,
'is_training': False,
'parser': {'aug_rand_hflip': False,
'aug_scale_max': 1.0,
'aug_scale_min': 1.0,
'match_threshold': 0.5,
'max_num_instances': 100,
'num_channels': 3,
'skip_crowd_during_training': True,
'unmatched_threshold': 0.5},
'sharding': True,
'shuffle_buffer_size': 2,
'tf_data_service_address': None,
'tf_data_service_job_name': None,
'tfds_as_supervised': False,
'tfds_data_dir': '',
'tfds_download': True,
'tfds_name': 'coco/2017',
'tfds_skip_decoding_feature': '',
'tfds_split': 'validation'}},
'trainer': {'allow_tpu_summary': False,
'best_checkpoint_eval_metric': '',
'best_checkpoint_export_subdir': '',
'best_checkpoint_metric_comp': 'higher',
'checkpoint_interval': 7392,
'continuous_eval_timeout': 3600,
'eval_tf_function': True,
'max_to_keep': 5,
'optimizer_config': {'ema': None,
'learning_rate': {'stepwise': {'boundaries': [421344,
495264],
'name': 'PiecewiseConstantDecay',
'values': [0.0175,
0.00175,
0.000175]},
'type': 'stepwise'},
'optimizer': {'sgd': {'clipnorm': None,
'clipvalue': None,
'decay': 0.0,
'momentum': 0.9,
'name': 'SGD',
'nesterov': False},
'type': 'sgd'},
'warmup': {'linear': {'name': 'linear',
'warmup_learning_rate': 0.0067,
'warmup_steps': 500},
'type': 'linear'}},
'steps_per_loop': 7392,
'summary_interval': 7392,
'train_steps': 532224,
'train_tf_function': True,
'train_tf_while_loop': True,
'validation_interval': 2000,
'validation_steps': 1564}}
I1030 19:08:45.768540 139885496419712 train_utils.py:115] Saving experiment configuration to training_dir/params.yaml
2020-10-30 19:08:45.783558: I tensorflow/stream_executor/platform/default/dso_loader.cc:48] Successfully opened dynamic library libcuda.so.1
2020-10-30 19:08:46.766930: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:982] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2020-10-30 19:08:46.767979: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1716] Found device 0 with properties:
pciBusID: 0000:00:04.0 name: Tesla P100-PCIE-16GB computeCapability: 6.0
coreClock: 1.3285GHz coreCount: 56 deviceMemorySize: 15.90GiB deviceMemoryBandwidth: 681.88GiB/s
2020-10-30 19:08:46.768062: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:982] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2020-10-30 19:08:46.769061: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1716] Found device 1 with properties:
pciBusID: 0000:00:05.0 name: Tesla P100-PCIE-16GB computeCapability: 6.0
coreClock: 1.3285GHz coreCount: 56 deviceMemorySize: 15.90GiB deviceMemoryBandwidth: 681.88GiB/s
2020-10-30 19:08:46.769111: I tensorflow/stream_executor/platform/default/dso_loader.cc:48] Successfully opened dynamic library libcudart.so.10.1
2020-10-30 19:08:46.771428: I tensorflow/stream_executor/platform/default/dso_loader.cc:48] Successfully opened dynamic library libcublas.so.10
2020-10-30 19:08:46.773310: I tensorflow/stream_executor/platform/default/dso_loader.cc:48] Successfully opened dynamic library libcufft.so.10
2020-10-30 19:08:46.773611: I tensorflow/stream_executor/platform/default/dso_loader.cc:48] Successfully opened dynamic library libcurand.so.10
2020-10-30 19:08:46.775726: I tensorflow/stream_executor/platform/default/dso_loader.cc:48] Successfully opened dynamic library libcusolver.so.10
2020-10-30 19:08:46.776841: I tensorflow/stream_executor/platform/default/dso_loader.cc:48] Successfully opened dynamic library libcusparse.so.10
2020-10-30 19:08:46.781620: I tensorflow/stream_executor/platform/default/dso_loader.cc:48] Successfully opened dynamic library libcudnn.so.7
2020-10-30 19:08:46.781722: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:982] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2020-10-30 19:08:46.782745: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:982] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2020-10-30 19:08:46.783768: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:982] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2020-10-30 19:08:46.784781: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:982] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2020-10-30 19:08:46.785793: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1858] Adding visible gpu devices: 0, 1
2020-10-30 19:08:46.786167: I tensorflow/core/platform/cpu_feature_guard.cc:142] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN)to use the following CPU instructions in performance-critical operations: AVX2 FMA
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
2020-10-30 19:08:46.796393: I tensorflow/core/platform/profile_utils/cpu_utils.cc:104] CPU Frequency: 2200000000 Hz
2020-10-30 19:08:46.797730: I tensorflow/compiler/xla/service/service.cc:168] XLA service 0x55b5dbc08730 initialized for platform Host (this does not guarantee that XLA will be used). Devices:
2020-10-30 19:08:46.797757: I tensorflow/compiler/xla/service/service.cc:176] StreamExecutor device (0): Host, Default Version
2020-10-30 19:08:46.996238: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:982] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2020-10-30 19:08:46.998987: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:982] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2020-10-30 19:08:47.000171: I tensorflow/compiler/xla/service/service.cc:168] XLA service 0x55b5dbc74b30 initialized for platform CUDA (this does not guarantee that XLA will be used). Devices:
2020-10-30 19:08:47.000199: I tensorflow/compiler/xla/service/service.cc:176] StreamExecutor device (0): Tesla P100-PCIE-16GB, Compute Capability 6.0
2020-10-30 19:08:47.000208: I tensorflow/compiler/xla/service/service.cc:176] StreamExecutor device (1): Tesla P100-PCIE-16GB, Compute Capability 6.0
2020-10-30 19:08:47.000736: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:982] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2020-10-30 19:08:47.001752: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1716] Found device 0 with properties:
pciBusID: 0000:00:04.0 name: Tesla P100-PCIE-16GB computeCapability: 6.0
coreClock: 1.3285GHz coreCount: 56 deviceMemorySize: 15.90GiB deviceMemoryBandwidth: 681.88GiB/s
2020-10-30 19:08:47.001840: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:982] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2020-10-30 19:08:47.002787: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1716] Found device 1 with properties:
pciBusID: 0000:00:05.0 name: Tesla P100-PCIE-16GB computeCapability: 6.0
coreClock: 1.3285GHz coreCount: 56 deviceMemorySize: 15.90GiB deviceMemoryBandwidth: 681.88GiB/s
2020-10-30 19:08:47.002819: I tensorflow/stream_executor/platform/default/dso_loader.cc:48] Successfully opened dynamic library libcudart.so.10.1
2020-10-30 19:08:47.002852: I tensorflow/stream_executor/platform/default/dso_loader.cc:48] Successfully opened dynamic library libcublas.so.10
2020-10-30 19:08:47.002872: I tensorflow/stream_executor/platform/default/dso_loader.cc:48] Successfully opened dynamic library libcufft.so.10
2020-10-30 19:08:47.002893: I tensorflow/stream_executor/platform/default/dso_loader.cc:48] Successfully opened dynamic library libcurand.so.10
2020-10-30 19:08:47.002912: I tensorflow/stream_executor/platform/default/dso_loader.cc:48] Successfully opened dynamic library libcusolver.so.10
2020-10-30 19:08:47.002942: I tensorflow/stream_executor/platform/default/dso_loader.cc:48] Successfully opened dynamic library libcusparse.so.10
2020-10-30 19:08:47.002963: I tensorflow/stream_executor/platform/default/dso_loader.cc:48] Successfully opened dynamic library libcudnn.so.7
2020-10-30 19:08:47.003028: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:982] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2020-10-30 19:08:47.004093: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:982] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2020-10-30 19:08:47.005124: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:982] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2020-10-30 19:08:47.006119: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:982] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2020-10-30 19:08:47.007080: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1858] Adding visible gpu devices: 0, 1
2020-10-30 19:08:47.007162: I tensorflow/stream_executor/platform/default/dso_loader.cc:48] Successfully opened dynamic library libcudart.so.10.1
2020-10-30 19:08:48.517733: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1257] Device interconnect StreamExecutor with strength 1 edge matrix:
2020-10-30 19:08:48.517799: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1263] 0 1
2020-10-30 19:08:48.517809: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1276] 0: N N
2020-10-30 19:08:48.517815: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1276] 1: N N
2020-10-30 19:08:48.518120: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:982] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2020-10-30 19:08:48.519212: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:982] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2020-10-30 19:08:48.520219: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:982] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2020-10-30 19:08:48.521275: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1402] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 14951 MB memory) -> physical GPU (device: 0, name: Tesla P100-PCIE-16GB, pci bus id: 0000:00:04.0, compute capability: 6.0)
2020-10-30 19:08:48.521948: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:982] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2020-10-30 19:08:48.522918: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1402] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:1 with 14951 MB memory) -> physical GPU (device: 1, name: Tesla P100-PCIE-16GB, pci bus id: 0000:00:05.0, compute capability: 6.0)
2020-10-30 19:08:48.829361: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:982] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2020-10-30 19:08:48.830101: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:982] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
WARNING:tensorflow:Mixed precision compatibility check (mixed_float16): WARNING
Your GPUs may run slowly with dtype policy mixed_float16 because they do not have compute capability of at least 7.0. Your GPUs:
Tesla P100-PCIE-16GB, compute capability 6.0 (x2)
See https://developer.nvidia.com/cuda-gpus for a list of GPUs and their compute capabilities.
If you will use compatible GPU(s) not attached to this host, e.g. by running a multi-worker model, you can ignore this warning. This message will only be logged once
W1030 19:08:48.833126 139885496419712 device_compatibility_check.py:111] Mixed precision compatibility check (mixed_float16): WARNING
Your GPUs may run slowly with dtype policy mixed_float16 because they do not have compute capability of at least 7.0. Your GPUs:
Tesla P100-PCIE-16GB, compute capability 6.0 (x2)
See https://developer.nvidia.com/cuda-gpus for a list of GPUs and their compute capabilities.
If you will use compatible GPU(s) not attached to this host, e.g. by running a multi-worker model, you can ignore this warning. This message will only be logged once
INFO:tensorflow:Using MirroredStrategy with devices ('/job:localhost/replica:0/task:0/device:GPU:0', '/job:localhost/replica:0/task:0/device:GPU:1')
I1030 19:08:48.835364 139885496419712 mirrored_strategy.py:341] Using MirroredStrategy with devices ('/job:localhost/replica:0/task:0/device:GPU:0', '/job:localhost/replica:0/task:0/device:GPU:1')
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
I1030 19:08:49.179665 139885496419712 cross_device_ops.py:443] Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
I1030 19:08:49.185065 139885496419712 cross_device_ops.py:443] Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
I1030 19:08:49.191244 139885496419712 cross_device_ops.py:443] Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
I1030 19:08:49.196064 139885496419712 cross_device_ops.py:443] Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
I1030 19:08:49.220396 139885496419712 cross_device_ops.py:443] Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
I1030 19:08:49.224822 139885496419712 cross_device_ops.py:443] Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
I1030 19:08:49.384853 139885496419712 cross_device_ops.py:443] Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
I1030 19:08:49.389641 139885496419712 cross_device_ops.py:443] Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
I1030 19:08:49.395605 139885496419712 cross_device_ops.py:443] Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
I1030 19:08:49.400318 139885496419712 cross_device_ops.py:443] Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
I1030 19:08:54.437170 139885496419712 train_lib.py:139] Not exporting the best checkpoint. data_dir: training_dir, export_subdir: , metric_name:
I1030 19:08:54.437393 139885496419712 train_utils.py:44] Running default trainer.
I1030 19:08:54.534491 139885496419712 dataset_info.py:361] Load dataset info from /home/vbanna/tensorflow_datasets/coco/2017/1.1.0
I1030 19:08:54.537066 139885496419712 dataset_builder.py:299] Reusing dataset coco (/home/vbanna/tensorflow_datasets/coco/2017/1.1.0)
I1030 19:08:54.537193 139885496419712 dataset_builder.py:511] Constructing tf.data.Dataset for split train, from /home/vbanna/tensorflow_datasets/coco/2017/1.1.0
I1030 19:08:59.088062 139885496419712 dataset_info.py:361] Load dataset info from /home/vbanna/tensorflow_datasets/coco/2017/1.1.0
I1030 19:08:59.090812 139885496419712 dataset_builder.py:299] Reusing dataset coco (/home/vbanna/tensorflow_datasets/coco/2017/1.1.0)
I1030 19:08:59.090951 139885496419712 dataset_builder.py:511] Constructing tf.data.Dataset for split validation, from /home/vbanna/tensorflow_datasets/coco/2017/1.1.0
I1030 19:09:00.456362 139885496419712 train_lib.py:206] Starts to execute mode: train_and_eval
I1030 19:09:00.457955 139885496419712 controller.py:167] Train at step 0 of 2000
I1030 19:09:00.459034 139885496419712 controller.py:334] Entering training loop at step 0 to run 2000 steps
INFO:tensorflow:batch_all_reduce: 285 all-reduces with algorithm = nccl, num_packs = 1
I1030 19:09:12.923888 139885496419712 cross_device_ops.py:702] batch_all_reduce: 285 all-reduces with algorithm = nccl, num_packs = 1
INFO:tensorflow:batch_all_reduce: 285 all-reduces with algorithm = nccl, num_packs = 1
I1030 19:09:33.195622 139885496419712 cross_device_ops.py:702] batch_all_reduce: 285 all-reduces with algorithm = nccl, num_packs = 1
2020-10-30 19:10:08.985502: I tensorflow/stream_executor/platform/default/dso_loader.cc:48] Successfully opened dynamic library libcudnn.so.7
2020-10-30 19:10:10.958205: I tensorflow/stream_executor/platform/default/dso_loader.cc:48] Successfully opened dynamic library libcublas.so.10
I1030 19:31:48.572363 139885496419712 controller.py:32] step: 2000 steps_per_second: 1.46 {'total_loss': 2.952513, 'cls_loss': 1.0308509, 'box_loss': 0.009998905, 'model_loss': 1.5307966, 'training_loss': 2.952513, 'learning_rate': 0.0175}
I1030 19:31:49.536207 139885496419712 controller.py:381] Saved checkpoints in training_dir/ckpt-2000
I1030 19:31:49.537605 139885496419712 controller.py:201] Running 1564 steps of evaluation at train step: 2000
I1030 20:03:47.641623 139885496419712 coco_evaluator.py:128] There is no annotation_file in COCOEvaluator.
I1030 20:06:02.119477 139885496419712 controller.py:32] step: 2000 evaluation metric: {'total_loss': 2.7545106, 'cls_loss': 0.9337675, 'box_loss': 0.0088655995, 'model_loss': 1.3770468, 'validation_loss': 2.7545106, 'AP': 0.0066075507, 'AP50': 0.016931148, 'AP75': 0.0040874337, 'APs': 0.00055430405, 'APm': 0.004762264, 'APl': 0.009988218, 'ARmax1': 0.04598566, 'ARmax10': 0.079140015, 'ARmax100': 0.08211258, 'ARs': 0.003963818, 'ARm': 0.03661192, 'ARl': 0.13360435}
I1030 20:06:02.146182 139885496419712 controller.py:167] Train at step 2000 of 4000
I1030 20:06:02.146733 139885496419712 controller.py:334] Entering training loop at step 2000 to run 2000 steps
I1030 20:27:36.706292 139885496419712 controller.py:32] step: 4000 steps_per_second: 0.60 {'total_loss': 2.6101823, 'cls_loss': 0.8718026, 'box_loss': 0.008097591, 'model_loss': 1.2766824, 'training_loss': 2.6101823, 'learning_rate': 0.0175}
I1030 20:27:36.717345 139885496419712 controller.py:201] Running 1564 steps of evaluation at train step: 4000
I1030 20:57:40.566221 139885496419712 coco_evaluator.py:128] There is no annotation_file in COCOEvaluator.
I1030 21:00:01.935145 139885496419712 controller.py:32] step: 4000 evaluation metric: {'total_loss': 2.5162745, 'cls_loss': 0.8355965, 'box_loss': 0.0078043677, 'model_loss': 1.225815, 'validation_loss': 2.5162745, 'AP': 0.021224046, 'AP50': 0.044965032, 'AP75': 0.017090354, 'APs': 0.0025279159, 'APm': 0.015290717, 'APl': 0.03299058, 'ARmax1': 0.07679166, 'ARmax10': 0.13622928, 'ARmax100': 0.14035718, 'ARs': 0.013522613, 'ARm': 0.09343928, 'ARl': 0.222368}
I1030 21:00:01.974603 139885496419712 controller.py:167] Train at step 4000 of 6000
I1030 21:00:01.975189 139885496419712 controller.py:334] Entering training loop at step 4000 to run 2000 steps
I1030 21:21:35.986476 139885496419712 controller.py:32] step: 6000 steps_per_second: 0.62 {'total_loss': 2.4335802, 'cls_loss': 0.81078637, 'box_loss': 0.0074549178, 'model_loss': 1.183532, 'training_loss': 2.4335802, 'learning_rate': 0.0175}
I1030 21:21:35.996690 139885496419712 controller.py:201] Running 1564 steps of evaluation at train step: 6000
I1030 21:51:36.730204 139885496419712 coco_evaluator.py:128] There is no annotation_file in COCOEvaluator.
I1030 21:54:01.907360 139885496419712 controller.py:32] step: 6000 evaluation metric: {'total_loss': 2.3840065, 'cls_loss': 0.8075656, 'box_loss': 0.0073272684, 'model_loss': 1.1739291, 'validation_loss': 2.3840065, 'AP': 0.028316734, 'AP50': 0.0557604, 'AP75': 0.025625333, 'APs': 0.0040961187, 'APm': 0.021599174, 'APl': 0.042726006, 'ARmax1': 0.0904667, 'ARmax10': 0.15265372, 'ARmax100': 0.15850124, 'ARs': 0.021724917, 'ARm': 0.13395934, 'ARl': 0.23424989}
I1030 21:54:01.934641 139885496419712 controller.py:167] Train at step 6000 of 8000
I1030 21:54:01.935158 139885496419712 controller.py:334] Entering training loop at step 6000 to run 2000 steps
I1030 22:15:35.174637 139885496419712 controller.py:32] step: 8000 steps_per_second: 0.62 {'total_loss': 2.288062, 'cls_loss': 0.76706284, 'box_loss': 0.00698252, 'model_loss': 1.1161889, 'training_loss': 2.288062, 'learning_rate': 0.0175}
I1030 22:15:35.184676 139885496419712 controller.py:201] Running 1564 steps of evaluation at train step: 8000
I1030 22:45:35.542509 139885496419712 coco_evaluator.py:128] There is no annotation_file in COCOEvaluator.
I1030 22:47:53.906541 139885496419712 controller.py:32] step: 8000 evaluation metric: {'total_loss': 2.2552168, 'cls_loss': 0.76536787, 'box_loss': 0.0071057146, 'model_loss': 1.1206533, 'validation_loss': 2.2552168, 'AP': 0.04372218, 'AP50': 0.084751606, 'AP75': 0.040151175, 'APs': 0.00746491, 'APm': 0.037402872, 'APl': 0.06688058, 'ARmax1': 0.11082497, 'ARmax10': 0.1863409, 'ARmax100': 0.19500773, 'ARs': 0.03183885, 'ARm': 0.17150564, 'ARl': 0.28622577}
I1030 22:47:53.939067 139885496419712 controller.py:167] Train at step 8000 of 10000
I1030 22:47:53.939667 139885496419712 controller.py:334] Entering training loop at step 8000 to run 2000 steps
I1030 23:09:27.526988 139885496419712 controller.py:32] step: 10000 steps_per_second: 0.62 {'total_loss': 2.1607676, 'cls_loss': 0.7287942, 'box_loss': 0.006658137, 'model_loss': 1.0617015, 'training_loss': 2.1607676, 'learning_rate': 0.0175}
I1030 23:09:28.357703 139885496419712 controller.py:381] Saved checkpoints in training_dir/ckpt-10000
I1030 23:09:28.359310 139885496419712 controller.py:201] Running 1564 steps of evaluation at train step: 10000
I1030 23:39:30.371847 139885496419712 coco_evaluator.py:128] There is no annotation_file in COCOEvaluator.
step: 2000 steps_per_second: 1.46 {'total_loss': 2.952513, 'cls_loss': 1.0308509, 'box_loss': 0.009998905, 'model_loss': 1.5307966, 'training_loss': 2.952513, 'learning_rate': 0.0175}
creating index...
index created!
creating index...
index created!
Running per image evaluation...
Evaluate annotation type *bbox*
DONE (t=90.15s).
Accumulating evaluation results...
DONE (t=15.30s).
Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.007
Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.017
Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.004
Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.001
Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.005
Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.010
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.046
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.079
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.082
Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.004
Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.037
Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.134
step: 2000 evaluation metric: {'total_loss': 2.7545106, 'cls_loss': 0.9337675, 'box_loss': 0.0088655995, 'model_loss': 1.3770468, 'validation_loss': 2.7545106, 'AP': 0.0066075507, 'AP50': 0.016931148, 'AP75': 0.0040874337, 'APs': 0.00055430405, 'APm': 0.004762264, 'APl': 0.009988218, 'ARmax1': 0.04598566, 'ARmax10': 0.079140015, 'ARmax100': 0.08211258, 'ARs': 0.003963818, 'ARm': 0.03661192, 'ARl': 0.13360435}
step: 4000 steps_per_second: 0.60 {'total_loss': 2.6101823, 'cls_loss': 0.8718026, 'box_loss': 0.008097591, 'model_loss': 1.2766824, 'training_loss': 2.6101823, 'learning_rate': 0.0175}
creating index...
index created!
creating index...
index created!
Running per image evaluation...
Evaluate annotation type *bbox*
DONE (t=93.47s).
Accumulating evaluation results...
DONE (t=18.72s).
Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.021
Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.045
Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.017
Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.003
Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.015
Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.033
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.077
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.136
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.140
Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.014
Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.093
Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.222
step: 4000 evaluation metric: {'total_loss': 2.5162745, 'cls_loss': 0.8355965, 'box_loss': 0.0078043677, 'model_loss': 1.225815, 'validation_loss': 2.5162745, 'AP': 0.021224046, 'AP50': 0.044965032, 'AP75': 0.017090354, 'APs': 0.0025279159, 'APm': 0.015290717, 'APl': 0.03299058, 'ARmax1': 0.07679166, 'ARmax10': 0.13622928, 'ARmax100': 0.14035718, 'ARs': 0.013522613, 'ARm': 0.09343928, 'ARl': 0.222368}
step: 6000 steps_per_second: 0.62 {'total_loss': 2.4335802, 'cls_loss': 0.81078637, 'box_loss': 0.0074549178, 'model_loss': 1.183532, 'training_loss': 2.4335802, 'learning_rate': 0.0175}
creating index...
index created!
creating index...
index created!
Running per image evaluation...
Evaluate annotation type *bbox*
DONE (t=103.30s).
Accumulating evaluation results...
DONE (t=15.17s).
Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.028
Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.056
Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.026
Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.004
Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.022
Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.043
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.090
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.153
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.159
Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.022
Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.134
Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.234
step: 6000 evaluation metric: {'total_loss': 2.3840065, 'cls_loss': 0.8075656, 'box_loss': 0.0073272684, 'model_loss': 1.1739291, 'validation_loss': 2.3840065, 'AP': 0.028316734, 'AP50': 0.0557604, 'AP75': 0.025625333, 'APs': 0.0040961187, 'APm': 0.021599174, 'APl': 0.042726006, 'ARmax1': 0.0904667, 'ARmax10': 0.15265372, 'ARmax100': 0.15850124, 'ARs': 0.021724917, 'ARm': 0.13395934, 'ARl': 0.23424989}
step: 8000 steps_per_second: 0.62 {'total_loss': 2.288062, 'cls_loss': 0.76706284, 'box_loss': 0.00698252, 'model_loss': 1.1161889, 'training_loss': 2.288062, 'learning_rate': 0.0175}
creating index...
index created!
creating index...
index created!
Running per image evaluation...
Evaluate annotation type *bbox*
DONE (t=95.07s).
Accumulating evaluation results...
DONE (t=14.88s).
Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.044
Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.085
Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.040
Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.007
Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.037
Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.067
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.111
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.186
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.195
Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.032
Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.172
Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.286
step: 8000 evaluation metric: {'total_loss': 2.2552168, 'cls_loss': 0.76536787, 'box_loss': 0.0071057146, 'model_loss': 1.1206533, 'validation_loss': 2.2552168, 'AP': 0.04372218, 'AP50': 0.084751606, 'AP75': 0.040151175, 'APs': 0.00746491, 'APm': 0.037402872, 'APl': 0.06688058, 'ARmax1': 0.11082497, 'ARmax10': 0.1863409, 'ARmax100': 0.19500773, 'ARs': 0.03183885, 'ARm': 0.17150564, 'ARl': 0.28622577}
step: 10000 steps_per_second: 0.62 {'total_loss': 2.1607676, 'cls_loss': 0.7287942, 'box_loss': 0.006658137, 'model_loss': 1.0617015, 'training_loss': 2.1607676, 'learning_rate': 0.0175}
creating index...
index created!
creating index...
index created!
Running per image evaluation...
Evaluate annotation type *bbox*
DONE (t=95.23s).
Accumulating evaluation results...
DONE (t=14.89s).
Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.054
Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.101
Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.053
Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.008
Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.045
Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.084
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.123
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.206
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.214I1030 23:41:48.419408 139885496419712 controller.py:32] step: 10000 evaluation metric: {'total_loss': 2.1264522, 'cls_loss': 0.72794926, 'box_loss': 0.006682801, 'model_loss': 1.0620897, 'validation_loss': 2.1264522, 'AP': 0.054195903, 'AP50': 0.10130227, 'AP75': 0.052946668, 'APs': 0.008050608, 'APm': 0.04529771, 'APl': 0.08441855, 'ARmax1': 0.122621804, 'ARmax10': 0.20553222, 'ARmax100': 0.21406764, 'ARs': 0.0369188, 'ARm': 0.19431876, 'ARl': 0.32154137}
I1030 23:41:48.448698 139885496419712 controller.py:167] Train at step 10000 of 12000
I1030 23:41:48.449317 139885496419712 controller.py:334] Entering training loop at step 10000 to run 2000 steps
I1031 00:03:22.028709 139885496419712 controller.py:32] step: 12000 steps_per_second: 0.62 {'total_loss': 2.0662794, 'cls_loss': 0.7089585, 'box_loss': 0.0065142633, 'model_loss': 1.0346715, 'training_loss': 2.0662794, 'learning_rate': 0.0175}
I1031 00:03:22.039475 139885496419712 controller.py:201] Running 1564 steps of evaluation at train step: 12000
I1031 00:33:23.688980 139885496419712 coco_evaluator.py:128] There is no annotation_file in COCOEvaluator.
I1031 00:35:45.345192 139885496419712 controller.py:32] step: 12000 evaluation metric: {'total_loss': 2.0662136, 'cls_loss': 0.7376824, 'box_loss': 0.006583337, 'model_loss': 1.066849, 'validation_loss': 2.0662136, 'AP': 0.059586268, 'AP50': 0.11119602, 'AP75': 0.05633912, 'APs': 0.011301484, 'APm': 0.052767806, 'APl': 0.089686915, 'ARmax1': 0.12972695, 'ARmax10': 0.21874677, 'ARmax100': 0.22815393, 'ARs': 0.041254662, 'ARm': 0.19183718, 'ARl': 0.3477511}
I1031 00:35:45.385468 139885496419712 controller.py:167] Train at step 12000 of 14000
I1031 00:35:45.386043 139885496419712 controller.py:334] Entering training loop at step 12000 to run 2000 steps
I1031 00:57:18.957478 139885496419712 controller.py:32] step: 14000 steps_per_second: 0.62 {'total_loss': 1.969025, 'cls_loss': 0.6870775, 'box_loss': 0.00626731, 'model_loss': 1.0004433, 'training_loss': 1.969025, 'learning_rate': 0.0175}
I1031 00:57:18.968271 139885496419712 controller.py:201] Running 1564 steps of evaluation at train step: 14000
I1031 01:27:21.231963 139885496419712 coco_evaluator.py:128] There is no annotation_file in COCOEvaluator.
I1031 01:29:44.961457 139885496419712 controller.py:32] step: 14000 evaluation metric: {'total_loss': 1.9750702, 'cls_loss': 0.7104824, 'box_loss': 0.0065220655, 'model_loss': 1.0365856, 'validation_loss': 1.9750702, 'AP': 0.06698314, 'AP50': 0.12553436, 'AP75': 0.06383309, 'APs': 0.012342103, 'APm': 0.05755333, 'APl': 0.10402489, 'ARmax1': 0.13613912, 'ARmax10': 0.2210759, 'ARmax100': 0.23037915, 'ARs': 0.042005893, 'ARm': 0.20071448, 'ARl': 0.3492529}
I1031 01:29:44.993362 139885496419712 controller.py:167] Train at step 14000 of 16000
I1031 01:29:44.994000 139885496419712 controller.py:334] Entering training loop at step 14000 to run 2000 steps
I1031 01:51:18.396526 139885496419712 controller.py:32] step: 16000 steps_per_second: 0.62 {'total_loss': 1.8947617, 'cls_loss': 0.67529494, 'box_loss': 0.006190216, 'model_loss': 0.9848059, 'training_loss': 1.8947617, 'learning_rate': 0.0175}
I1031 01:51:18.406329 139885496419712 controller.py:201] Running 1564 steps of evaluation at train step: 16000
I1031 02:21:20.922477 139885496419712 coco_evaluator.py:128] There is no annotation_file in COCOEvaluator.
I1031 02:23:47.485918 139885496419712 controller.py:32] step: 16000 evaluation metric: {'total_loss': 1.9796606, 'cls_loss': 0.78193307, 'box_loss': 0.0063071055, 'model_loss': 1.0972879, 'validation_loss': 1.9796606, 'AP': 0.065123744, 'AP50': 0.11864428, 'AP75': 0.06447318, 'APs': 0.015601997, 'APm': 0.06371723, 'APl': 0.093020916, 'ARmax1': 0.12038191, 'ARmax10': 0.20010678, 'ARmax100': 0.2089182, 'ARs': 0.046627745, 'ARm': 0.20985255, 'ARl': 0.26905948}
I1031 02:23:47.515874 139885496419712 controller.py:167] Train at step 16000 of 18000
I1031 02:23:47.516430 139885496419712 controller.py:334] Entering training loop at step 16000 to run 2000 steps
I1031 02:45:21.368029 139885496419712 controller.py:32] step: 18000 steps_per_second: 0.62 {'total_loss': 1.8081924, 'cls_loss': 0.65352744, 'box_loss': 0.0059808283, 'model_loss': 0.95256877, 'training_loss': 1.8081924, 'learning_rate': 0.0175}
I1031 02:45:22.204224 139885496419712 controller.py:381] Saved checkpoints in training_dir/ckpt-18000
I1031 02:45:22.205612 139885496419712 controller.py:201] Running 1564 steps of evaluation at train step: 18000
I1031 03:15:22.550125 139885496419712 coco_evaluator.py:128] There is no annotation_file in COCOEvaluator.
I1031 03:17:51.459395 139885496419712 controller.py:32] step: 18000 evaluation metric: {'total_loss': 1.8092948, 'cls_loss': 0.6702564, 'box_loss': 0.006191191, 'model_loss': 0.97981566, 'validation_loss': 1.8092948, 'AP': 0.084427126, 'AP50': 0.15396488, 'AP75': 0.08334383, 'APs': 0.020888355, 'APm': 0.072113805, 'APl': 0.1291615, 'ARmax1': 0.15485245, 'ARmax10': 0.25424233, 'ARmax100': 0.26476988, 'ARs': 0.05902366, 'ARm': 0.24243225, 'ARl': 0.39029872}
I1031 03:17:51.488985 139885496419712 controller.py:167] Train at step 18000 of 20000
I1031 03:17:51.489529 139885496419712 controller.py:334] Entering training loop at step 18000 to run 2000 steps
I1031 03:39:24.790594 139885496419712 controller.py:32] step: 20000 steps_per_second: 0.62 {'total_loss': 1.7407566, 'cls_loss': 0.6418706, 'box_loss': 0.0058854446, 'model_loss': 0.93614256, 'training_loss': 1.7407566, 'learning_rate': 0.0175}
I1031 03:39:24.801592 139885496419712 controller.py:201] Running 1564 steps of evaluation at train step: 20000
I1031 04:09:25.722967 139885496419712 coco_evaluator.py:128] There is no annotation_file in COCOEvaluator.
Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.037
Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.194
Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.322
step: 10000 evaluation metric: {'total_loss': 2.1264522, 'cls_loss': 0.72794926, 'box_loss': 0.006682801, 'model_loss': 1.0620897, 'validation_loss': 2.1264522, 'AP': 0.054195903, 'AP50': 0.10130227, 'AP75': 0.052946668, 'APs': 0.008050608, 'APm': 0.04529771, 'APl': 0.08441855, 'ARmax1': 0.122621804, 'ARmax10': 0.20553222, 'ARmax100': 0.21406764, 'ARs': 0.0369188, 'ARm': 0.19431876, 'ARl': 0.32154137}
step: 12000 steps_per_second: 0.62 {'total_loss': 2.0662794, 'cls_loss': 0.7089585, 'box_loss': 0.0065142633, 'model_loss': 1.0346715, 'training_loss': 2.0662794, 'learning_rate': 0.0175}
creating index...
index created!
creating index...
index created!
Running per image evaluation...
Evaluate annotation type *bbox*
DONE (t=97.03s).
Accumulating evaluation results...
DONE (t=18.19s).
Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.060
Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.111
Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.056
Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.011
Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.053
Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.090
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.130
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.219
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.228
Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.041
Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.192
Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.348
step: 12000 evaluation metric: {'total_loss': 2.0662136, 'cls_loss': 0.7376824, 'box_loss': 0.006583337, 'model_loss': 1.066849, 'validation_loss': 2.0662136, 'AP': 0.059586268, 'AP50': 0.11119602, 'AP75': 0.05633912, 'APs': 0.011301484, 'APm': 0.052767806, 'APl': 0.089686915, 'ARmax1': 0.12972695, 'ARmax10': 0.21874677, 'ARmax100': 0.22815393, 'ARs': 0.041254662, 'ARm': 0.19183718, 'ARl': 0.3477511}
step: 14000 steps_per_second: 0.62 {'total_loss': 1.969025, 'cls_loss': 0.6870775, 'box_loss': 0.00626731, 'model_loss': 1.0004433, 'training_loss': 1.969025, 'learning_rate': 0.0175}
creating index...
index created!
creating index...
index created!
Running per image evaluation...
Evaluate annotation type *bbox*
DONE (t=103.28s).
Accumulating evaluation results...
DONE (t=14.10s).
Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.067
Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.126
Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.064
Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.012
Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.058
Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.104
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.136
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.221
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.230
Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.042
Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.201
Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.349
step: 14000 evaluation metric: {'total_loss': 1.9750702, 'cls_loss': 0.7104824, 'box_loss': 0.0065220655, 'model_loss': 1.0365856, 'validation_loss': 1.9750702, 'AP': 0.06698314, 'AP50': 0.12553436, 'AP75': 0.06383309, 'APs': 0.012342103, 'APm': 0.05755333, 'APl': 0.10402489, 'ARmax1': 0.13613912, 'ARmax10': 0.2210759, 'ARmax100': 0.23037915, 'ARs': 0.042005893, 'ARm': 0.20071448, 'ARl': 0.3492529}
step: 16000 steps_per_second: 0.62 {'total_loss': 1.8947617, 'cls_loss': 0.67529494, 'box_loss': 0.006190216, 'model_loss': 0.9848059, 'training_loss': 1.8947617, 'learning_rate': 0.0175}
creating index...
index created!
creating index...
index created!
Running per image evaluation...
Evaluate annotation type *bbox*
DONE (t=105.23s).
Accumulating evaluation results...
DONE (t=13.30s).
Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.065
Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.119
Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.064
Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.016
Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.064
Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.093
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.120
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.200
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.209
Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.047
Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.210
Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.269
step: 16000 evaluation metric: {'total_loss': 1.9796606, 'cls_loss': 0.78193307, 'box_loss': 0.0063071055, 'model_loss': 1.0972879, 'validation_loss': 1.9796606, 'AP': 0.065123744, 'AP50': 0.11864428, 'AP75': 0.06447318, 'APs': 0.015601997, 'APm': 0.06371723, 'APl': 0.093020916, 'ARmax1': 0.12038191, 'ARmax10': 0.20010678, 'ARmax100': 0.2089182, 'ARs': 0.046627745, 'ARm': 0.20985255, 'ARl': 0.26905948}
step: 18000 steps_per_second: 0.62 {'total_loss': 1.8081924, 'cls_loss': 0.65352744, 'box_loss': 0.0059808283, 'model_loss': 0.95256877, 'training_loss': 1.8081924, 'learning_rate': 0.0175}
creating index...
index created!
creating index...
index created!
Running per image evaluation...
Evaluate annotation type *bbox*
DONE (t=103.97s).
Accumulating evaluation results...
DONE (t=15.19s).
Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.084
Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.154
Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.083
Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.021
Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.072
Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.129
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.155
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.254
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.265
Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.059
Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.242
Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.390
step: 18000 evaluation metric: {'total_loss': 1.8092948, 'cls_loss': 0.6702564, 'box_loss': 0.006191191, 'model_loss': 0.97981566, 'validation_loss': 1.8092948, 'AP': 0.084427126, 'AP50': 0.15396488, 'AP75': 0.08334383, 'APs': 0.020888355, 'APm': 0.072113805, 'APl': 0.1291615, 'ARmax1': 0.15485245, 'ARmax10': 0.25424233, 'ARmax100': 0.26476988, 'ARs': 0.05902366, 'ARm': 0.24243225, 'ARl': 0.39029872}
step: 20000 steps_per_second: 0.62 {'total_loss': 1.7407566, 'cls_loss': 0.6418706, 'box_loss': 0.0058854446, 'model_loss': 0.93614256, 'training_loss': 1.7407566, 'learning_rate': 0.0175}
creating index...
index created!
creating index...
index created!
Running per image evaluation...
Evaluate annotation type *bbox*
DONE (t=95.65s).
Accumulating evaluation results...
DONE (t=13.85s).
Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.093I1031 04:11:42.257416 139885496419712 controller.py:32] step: 20000 evaluation metric: {'total_loss': 1.7365601, 'cls_loss': 0.6543167, 'box_loss': 0.0060396246, 'model_loss': 0.9562982, 'validation_loss': 1.7365601, 'AP': 0.09287657, 'AP50': 0.16696998, 'AP75': 0.091831, 'APs': 0.016764913, 'APm': 0.08103716, 'APl': 0.14146243, 'ARmax1': 0.15737882, 'ARmax10': 0.25748417, 'ARmax100': 0.27028137, 'ARs': 0.050834853, 'ARm': 0.250391, 'ARl': 0.4025708}
I1031 04:11:42.288806 139885496419712 controller.py:167] Train at step 20000 of 22000
I1031 04:11:42.289491 139885496419712 controller.py:334] Entering training loop at step 20000 to run 2000 steps
I1031 04:33:16.587417 139885496419712 controller.py:32] step: 22000 steps_per_second: 0.62 {'total_loss': 1.6866505, 'cls_loss': 0.6370228, 'box_loss': 0.00584322, 'model_loss': 0.9291848, 'training_loss': 1.6866505, 'learning_rate': 0.0175}
I1031 04:33:16.598840 139885496419712 controller.py:201] Running 1564 steps of evaluation at train step: 22000
I1031 05:03:17.182420 139885496419712 coco_evaluator.py:128] There is no annotation_file in COCOEvaluator.
I1031 05:05:34.372040 139885496419712 controller.py:32] step: 22000 evaluation metric: {'total_loss': 1.689145, 'cls_loss': 0.6559819, 'box_loss': 0.0059656464, 'model_loss': 0.95426446, 'validation_loss': 1.689145, 'AP': 0.097518176, 'AP50': 0.17676663, 'AP75': 0.09696455, 'APs': 0.024464171, 'APm': 0.08607653, 'APl': 0.14655727, 'ARmax1': 0.16281688, 'ARmax10': 0.2641258, 'ARmax100': 0.27565268, 'ARs': 0.070531555, 'ARm': 0.26941356, 'ARl': 0.3862451}
I1031 05:05:34.408134 139885496419712 controller.py:167] Train at step 22000 of 24000
I1031 05:05:34.408637 139885496419712 controller.py:334] Entering training loop at step 22000 to run 2000 steps
I1031 05:27:08.061284 139885496419712 controller.py:32] step: 24000 steps_per_second: 0.62 {'total_loss': 1.617964, 'cls_loss': 0.6204008, 'box_loss': 0.00568443, 'model_loss': 0.90462315, 'training_loss': 1.617964, 'learning_rate': 0.0175}
I1031 05:27:08.071365 139885496419712 controller.py:201] Running 1564 steps of evaluation at train step: 24000
I1031 05:57:07.937834 139885496419712 coco_evaluator.py:128] There is no annotation_file in COCOEvaluator.
I1031 05:59:30.306594 139885496419712 controller.py:32] step: 24000 evaluation metric: {'total_loss': 1.6017793, 'cls_loss': 0.6207918, 'box_loss': 0.005774223, 'model_loss': 0.9095029, 'validation_loss': 1.6017793, 'AP': 0.1170256, 'AP50': 0.20438606, 'AP75': 0.11793307, 'APs': 0.024898289, 'APm': 0.09308181, 'APl': 0.1858881, 'ARmax1': 0.17547216, 'ARmax10': 0.286284, 'ARmax100': 0.30100176, 'ARs': 0.072808474, 'ARm': 0.28913948, 'ARl': 0.43568316}
I1031 05:59:30.340992 139885496419712 controller.py:167] Train at step 24000 of 26000
I1031 05:59:30.341486 139885496419712 controller.py:334] Entering training loop at step 24000 to run 2000 steps
I1031 06:21:05.103114 139885496419712 controller.py:32] step: 26000 steps_per_second: 0.62 {'total_loss': 1.5680726, 'cls_loss': 0.6153645, 'box_loss': 0.0056095384, 'model_loss': 0.8958406, 'training_loss': 1.5680726, 'learning_rate': 0.0175}
I1031 06:21:05.891192 139885496419712 controller.py:381] Saved checkpoints in training_dir/ckpt-26000
I1031 06:21:05.892592 139885496419712 controller.py:201] Running 1564 steps of evaluation at train step: 26000
I1031 06:51:07.122737 139885496419712 coco_evaluator.py:128] There is no annotation_file in COCOEvaluator.
I1031 06:53:27.149734 139885496419712 controller.py:32] step: 26000 evaluation metric: {'total_loss': 1.5675296, 'cls_loss': 0.62697786, 'box_loss': 0.0057582282, 'model_loss': 0.91488904, 'validation_loss': 1.5675296, 'AP': 0.1090435, 'AP50': 0.19172487, 'AP75': 0.10934281, 'APs': 0.019161979, 'APm': 0.093128696, 'APl': 0.16513485, 'ARmax1': 0.1705256, 'ARmax10': 0.27630976, 'ARmax100': 0.2891266, 'ARs': 0.06790081, 'ARm': 0.2806543, 'ARl': 0.40896055}
I1031 06:53:27.187281 139885496419712 controller.py:167] Train at step 26000 of 28000
I1031 06:53:27.187832 139885496419712 controller.py:334] Entering training loop at step 26000 to run 2000 steps
I1031 07:15:01.245083 139885496419712 controller.py:32] step: 28000 steps_per_second: 0.62 {'total_loss': 1.5182695, 'cls_loss': 0.60736024, 'box_loss': 0.005538965, 'model_loss': 0.8843092, 'training_loss': 1.5182695, 'learning_rate': 0.0175}
I1031 07:15:01.255714 139885496419712 controller.py:201] Running 1564 steps of evaluation at train step: 28000
I1031 07:45:01.585833 139885496419712 coco_evaluator.py:128] There is no annotation_file in COCOEvaluator.
I1031 07:47:28.053360 139885496419712 controller.py:32] step: 28000 evaluation metric: {'total_loss': 1.5189503, 'cls_loss': 0.62144244, 'box_loss': 0.0056359097, 'model_loss': 0.9032378, 'validation_loss': 1.5189503, 'AP': 0.12010572, 'AP50': 0.20608844, 'AP75': 0.1223577, 'APs': 0.028878134, 'APm': 0.095463865, 'APl': 0.18948387, 'ARmax1': 0.18265218, 'ARmax10': 0.29688853, 'ARmax100': 0.30938557, 'ARs': 0.076697454, 'ARm': 0.2944731, 'ARl': 0.45407113}
Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.167
Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.092
Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.017
Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.081
Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.141
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.157
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.257
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.270
Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.051
Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.250
Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.403
step: 20000 evaluation metric: {'total_loss': 1.7365601, 'cls_loss': 0.6543167, 'box_loss': 0.0060396246, 'model_loss': 0.9562982, 'validation_loss': 1.7365601, 'AP': 0.09287657, 'AP50': 0.16696998, 'AP75': 0.091831, 'APs': 0.016764913, 'APm': 0.08103716, 'APl': 0.14146243, 'ARmax1': 0.15737882, 'ARmax10': 0.25748417, 'ARmax100': 0.27028137, 'ARs': 0.050834853, 'ARm': 0.250391, 'ARl': 0.4025708}
step: 22000 steps_per_second: 0.62 {'total_loss': 1.6866505, 'cls_loss': 0.6370228, 'box_loss': 0.00584322, 'model_loss': 0.9291848, 'training_loss': 1.6866505, 'learning_rate': 0.0175}
creating index...
index created!
creating index...
index created!
Running per image evaluation...
Evaluate annotation type *bbox*
DONE (t=95.87s).
Accumulating evaluation results...
DONE (t=13.83s).
Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.098
Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.177
Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.097
Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.024
Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.086
Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.147
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.163
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.264
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.276
Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.071
Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.269
Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.386
step: 22000 evaluation metric: {'total_loss': 1.689145, 'cls_loss': 0.6559819, 'box_loss': 0.0059656464, 'model_loss': 0.95426446, 'validation_loss': 1.689145, 'AP': 0.097518176, 'AP50': 0.17676663, 'AP75': 0.09696455, 'APs': 0.024464171, 'APm': 0.08607653, 'APl': 0.14655727, 'ARmax1': 0.16281688, 'ARmax10': 0.2641258, 'ARmax100': 0.27565268, 'ARs': 0.070531555, 'ARm': 0.26941356, 'ARl': 0.3862451}
step: 24000 steps_per_second: 0.62 {'total_loss': 1.617964, 'cls_loss': 0.6204008, 'box_loss': 0.00568443, 'model_loss': 0.90462315, 'training_loss': 1.617964, 'learning_rate': 0.0175}
creating index...
index created!
creating index...
index created!
Running per image evaluation...
Evaluate annotation type *bbox*
DONE (t=100.82s).
Accumulating evaluation results...
DONE (t=13.87s).
Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.117
Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.204
Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.118
Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.025
Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.093
Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.186
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.175
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.286
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.301
Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.073
Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.289
Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.436
step: 24000 evaluation metric: {'total_loss': 1.6017793, 'cls_loss': 0.6207918, 'box_loss': 0.005774223, 'model_loss': 0.9095029, 'validation_loss': 1.6017793, 'AP': 0.1170256, 'AP50': 0.20438606, 'AP75': 0.11793307, 'APs': 0.024898289, 'APm': 0.09308181, 'APl': 0.1858881, 'ARmax1': 0.17547216, 'ARmax10': 0.286284, 'ARmax100': 0.30100176, 'ARs': 0.072808474, 'ARm': 0.28913948, 'ARl': 0.43568316}
step: 26000 steps_per_second: 0.62 {'total_loss': 1.5680726, 'cls_loss': 0.6153645, 'box_loss': 0.0056095384, 'model_loss': 0.8958406, 'training_loss': 1.5680726, 'learning_rate': 0.0175}
creating index...
index created!
creating index...
index created!
Running per image evaluation...
Evaluate annotation type *bbox*
DONE (t=99.39s).
Accumulating evaluation results...
DONE (t=14.22s).
Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.109
Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.192
Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.109
Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.019
Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.093
Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.165
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.171
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.276
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.289
Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.068
Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.281
Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.409
step: 26000 evaluation metric: {'total_loss': 1.5675296, 'cls_loss': 0.62697786, 'box_loss': 0.0057582282, 'model_loss': 0.91488904, 'validation_loss': 1.5675296, 'AP': 0.1090435, 'AP50': 0.19172487, 'AP75': 0.10934281, 'APs': 0.019161979, 'APm': 0.093128696, 'APl': 0.16513485, 'ARmax1': 0.1705256, 'ARmax10': 0.27630976, 'ARmax100': 0.2891266, 'ARs': 0.06790081, 'ARm': 0.2806543, 'ARl': 0.40896055}
step: 28000 steps_per_second: 0.62 {'total_loss': 1.5182695, 'cls_loss': 0.60736024, 'box_loss': 0.005538965, 'model_loss': 0.8843092, 'training_loss': 1.5182695, 'learning_rate': 0.0175}
creating index...
index created!
creating index...
index created!
Running per image evaluation...
Evaluate annotation type *bbox*
DONE (t=102.35s).
Accumulating evaluation results...
DONE (t=14.55s).
Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.120
Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.206
Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.122
Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.029
Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.095
Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.189
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.183
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.297
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.309
Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.077
Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.294
Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.454
step: 28000 evaluation metric: {'total_loss': 1.5189503, 'cls_loss': 0.62144244, 'box_loss': 0.0056359097, 'model_loss': 0.9032378, 'validation_loss': 1.5189503, 'AP': 0.12010572, 'AP50': 0.20608844, 'AP75': 0.1223577, 'APs': 0.028878134, 'APm': 0.095463865, 'APl': 0.18948387, 'ARmax1': 0.18265218, 'ARmax10': 0.29688853, 'ARmax100': 0.30938557, 'ARs': 0.076697454, 'ARm': 0.2944731, 'ARl': 0.45407113}I1031 07:47:28.088339 139885496419712 controller.py:167] Train at step 28000 of 30000
I1031 07:47:28.088917 139885496419712 controller.py:334] Entering training loop at step 28000 to run 2000 steps
I1031 08:09:02.155096 139885496419712 controller.py:32] step: 30000 steps_per_second: 0.62 {'total_loss': 1.4702015, 'cls_loss': 0.5977997, 'box_loss': 0.005480808, 'model_loss': 0.87184054, 'training_loss': 1.4702015, 'learning_rate': 0.0175}
I1031 08:09:02.165273 139885496419712 controller.py:201] Running 1564 steps of evaluation at train step: 30000
I1031 08:39:00.798716 139885496419712 coco_evaluator.py:128] There is no annotation_file in COCOEvaluator.
I1031 08:41:16.337606 139885496419712 controller.py:32] step: 30000 evaluation metric: {'total_loss': 1.4634776, 'cls_loss': 0.60642874, 'box_loss': 0.005513028, 'model_loss': 0.88208026, 'validation_loss': 1.4634776, 'AP': 0.12509336, 'AP50': 0.21519625, 'AP75': 0.12908259, 'APs': 0.024677623, 'APm': 0.10667812, 'APl': 0.19518888, 'ARmax1': 0.1834508, 'ARmax10': 0.29639313, 'ARmax100': 0.3098349, 'ARs': 0.074831896, 'ARm': 0.29660422, 'ARl': 0.4517507}
I1031 08:41:16.367896 139885496419712 controller.py:167] Train at step 30000 of 32000
I1031 08:41:16.368516 139885496419712 controller.py:334] Entering training loop at step 30000 to run 2000 steps
I1031 09:02:50.250838 139885496419712 controller.py:32] step: 32000 steps_per_second: 0.62 {'total_loss': 1.4236084, 'cls_loss': 0.5891118, 'box_loss': 0.005384081, 'model_loss': 0.8583154, 'training_loss': 1.4236084, 'learning_rate': 0.0175}
I1031 09:02:50.261479 139885496419712 controller.py:201] Running 1564 steps of evaluation at train step: 32000
I1031 09:32:52.535594 139885496419712 coco_evaluator.py:128] There is no annotation_file in COCOEvaluator.
I1031 09:35:15.225395 139885496419712 controller.py:32] step: 32000 evaluation metric: {'total_loss': 1.4254229, 'cls_loss': 0.6034627, 'box_loss': 0.005448834, 'model_loss': 0.8759045, 'validation_loss': 1.4254229, 'AP': 0.12228533, 'AP50': 0.21047877, 'AP75': 0.12515314, 'APs': 0.021129027, 'APm': 0.106184624, 'APl': 0.18722458, 'ARmax1': 0.18199767, 'ARmax10': 0.29837707, 'ARmax100': 0.31369108, 'ARs': 0.0720723, 'ARm': 0.29641658, 'ARl': 0.46136618}
I1031 09:35:15.259743 139885496419712 controller.py:167] Train at step 32000 of 34000
I1031 09:35:15.260261 139885496419712 controller.py:334] Entering training loop at step 32000 to run 2000 steps
I1031 09:56:49.535590 139885496419712 controller.py:32] step: 34000 steps_per_second: 0.62 {'total_loss': 1.3879714, 'cls_loss': 0.5854915, 'box_loss': 0.0053599207, 'model_loss': 0.8534866, 'training_loss': 1.3879714, 'learning_rate': 0.0175}
I1031 09:56:50.356324 139885496419712 controller.py:381] Saved checkpoints in training_dir/ckpt-34000
I1031 09:56:50.357955 139885496419712 controller.py:201] Running 1564 steps of evaluation at train step: 34000
I1031 10:26:50.954882 139885496419712 coco_evaluator.py:128] There is no annotation_file in COCOEvaluator.
I1031 10:29:03.090598 139885496419712 controller.py:32] step: 34000 evaluation metric: {'total_loss': 1.4026945, 'cls_loss': 0.6071424, 'box_loss': 0.0055147246, 'model_loss': 0.8828786, 'validation_loss': 1.4026945, 'AP': 0.12370313, 'AP50': 0.21386495, 'AP75': 0.1273946, 'APs': 0.023783108, 'APm': 0.103701055, 'APl': 0.19189158, 'ARmax1': 0.18421498, 'ARmax10': 0.2939707, 'ARmax100': 0.30837682, 'ARs': 0.075453445, 'ARm': 0.28140718, 'ARl': 0.4575001}
I1031 10:29:03.115907 139885496419712 controller.py:167] Train at step 34000 of 36000
I1031 10:29:03.116474 139885496419712 controller.py:334] Entering training loop at step 34000 to run 2000 steps
I1031 10:50:37.206810 139885496419712 controller.py:32] step: 36000 steps_per_second: 0.62 {'total_loss': 1.3549321, 'cls_loss': 0.58248144, 'box_loss': 0.005332086, 'model_loss': 0.8490854, 'training_loss': 1.3549321, 'learning_rate': 0.0175}
I1031 10:50:37.217392 139885496419712 controller.py:201] Running 1564 steps of evaluation at train step: 36000
I1031 11:20:39.467981 139885496419712 coco_evaluator.py:128] There is no annotation_file in COCOEvaluator.
I1031 11:22:56.289299 139885496419712 controller.py:32] step: 36000 evaluation metric: {'total_loss': 1.3784864, 'cls_loss': 0.60839164, 'box_loss': 0.0055575483, 'model_loss': 0.8862693, 'validation_loss': 1.3784864, 'AP': 0.12903413, 'AP50': 0.22434664, 'AP75': 0.13116604, 'APs': 0.025630709, 'APm': 0.111586586, 'APl': 0.20179033, 'ARmax1': 0.18421678, 'ARmax10': 0.29527283, 'ARmax100': 0.3096738, 'ARs': 0.07373256, 'ARm': 0.2999831, 'ARl': 0.44662184}
I1031 11:22:56.318710 139885496419712 controller.py:167] Train at step 36000 of 38000
I1031 11:22:56.319190 139885496419712 controller.py:334] Entering training loop at step 36000 to run 2000 steps
I1031 11:44:30.339666 139885496419712 controller.py:32] step: 38000 steps_per_second: 0.62 {'total_loss': 1.3185784, 'cls_loss': 0.57590026, 'box_loss': 0.0052685854, 'model_loss': 0.8393302, 'training_loss': 1.3185784, 'learning_rate': 0.0175}
I1031 11:44:30.350597 139885496419712 controller.py:201] Running 1564 steps of evaluation at train step: 38000
I1031 12:14:31.522972 139885496419712 coco_evaluator.py:128] There is no annotation_file in COCOEvaluator.
step: 30000 steps_per_second: 0.62 {'total_loss': 1.4702015, 'cls_loss': 0.5977997, 'box_loss': 0.005480808, 'model_loss': 0.87184054, 'training_loss': 1.4702015, 'learning_rate': 0.0175}
creating index...
index created!
creating index...
index created!
Running per image evaluation...
Evaluate annotation type *bbox*
DONE (t=94.71s).
Accumulating evaluation results...
DONE (t=13.82s).
Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.125
Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.215
Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.129
Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.025
Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.107
Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.195
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.183
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.296
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.310
Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.075
Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.297
Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.452
step: 30000 evaluation metric: {'total_loss': 1.4634776, 'cls_loss': 0.60642874, 'box_loss': 0.005513028, 'model_loss': 0.88208026, 'validation_loss': 1.4634776, 'AP': 0.12509336, 'AP50': 0.21519625, 'AP75': 0.12908259, 'APs': 0.024677623, 'APm': 0.10667812, 'APl': 0.19518888, 'ARmax1': 0.1834508, 'ARmax10': 0.29639313, 'ARmax100': 0.3098349, 'ARs': 0.074831896, 'ARm': 0.29660422, 'ARl': 0.4517507}
step: 32000 steps_per_second: 0.62 {'total_loss': 1.4236084, 'cls_loss': 0.5891118, 'box_loss': 0.005384081, 'model_loss': 0.8583154, 'training_loss': 1.4236084, 'learning_rate': 0.0175}
creating index...
index created!
creating index...
index created!
Running per image evaluation...
Evaluate annotation type *bbox*
DONE (t=99.15s).
Accumulating evaluation results...
DONE (t=17.32s).
Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.122
Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.210
Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.125
Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.021
Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.106
Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.187
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.182
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.298
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.314
Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.072
Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.296
Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.461
step: 32000 evaluation metric: {'total_loss': 1.4254229, 'cls_loss': 0.6034627, 'box_loss': 0.005448834, 'model_loss': 0.8759045, 'validation_loss': 1.4254229, 'AP': 0.12228533, 'AP50': 0.21047877, 'AP75': 0.12515314, 'APs': 0.021129027, 'APm': 0.106184624, 'APl': 0.18722458, 'ARmax1': 0.18199767, 'ARmax10': 0.29837707, 'ARmax100': 0.31369108, 'ARs': 0.0720723, 'ARm': 0.29641658, 'ARl': 0.46136618}
step: 34000 steps_per_second: 0.62 {'total_loss': 1.3879714, 'cls_loss': 0.5854915, 'box_loss': 0.0053599207, 'model_loss': 0.8534866, 'training_loss': 1.3879714, 'learning_rate': 0.0175}
creating index...
index created!
creating index...
index created!
Running per image evaluation...
Evaluate annotation type *bbox*
DONE (t=91.78s).
Accumulating evaluation results...
DONE (t=13.64s).
Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.124
Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.214
Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.127
Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.024
Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.104
Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.192
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.184
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.294
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.308
Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.075
Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.281
Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.458
step: 34000 evaluation metric: {'total_loss': 1.4026945, 'cls_loss': 0.6071424, 'box_loss': 0.0055147246, 'model_loss': 0.8828786, 'validation_loss': 1.4026945, 'AP': 0.12370313, 'AP50': 0.21386495, 'AP75': 0.1273946, 'APs': 0.023783108, 'APm': 0.103701055, 'APl': 0.19189158, 'ARmax1': 0.18421498, 'ARmax10': 0.2939707, 'ARmax100': 0.30837682, 'ARs': 0.075453445, 'ARm': 0.28140718, 'ARl': 0.4575001}
step: 36000 steps_per_second: 0.62 {'total_loss': 1.3549321, 'cls_loss': 0.58248144, 'box_loss': 0.005332086, 'model_loss': 0.8490854, 'training_loss': 1.3549321, 'learning_rate': 0.0175}
creating index...
index created!
creating index...
index created!
Running per image evaluation...
Evaluate annotation type *bbox*
DONE (t=96.87s).
Accumulating evaluation results...
DONE (t=13.45s).
Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.129
Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.224
Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.131
Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.026
Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.112
Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.202
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.184
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.295
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.310
Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.074
Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.300
Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.447
step: 36000 evaluation metric: {'total_loss': 1.3784864, 'cls_loss': 0.60839164, 'box_loss': 0.0055575483, 'model_loss': 0.8862693, 'validation_loss': 1.3784864, 'AP': 0.12903413, 'AP50': 0.22434664, 'AP75': 0.13116604, 'APs': 0.025630709, 'APm': 0.111586586, 'APl': 0.20179033, 'ARmax1': 0.18421678, 'ARmax10': 0.29527283, 'ARmax100': 0.3096738, 'ARs': 0.07373256, 'ARm': 0.2999831, 'ARl': 0.44662184}
step: 38000 steps_per_second: 0.62 {'total_loss': 1.3185784, 'cls_loss': 0.57590026, 'box_loss': 0.0052685854, 'model_loss': 0.8393302, 'training_loss': 1.3185784, 'learning_rate': 0.0175}
creating index...
index created!
creating index...
index created!
Running per image evaluation...
Evaluate annotation type *bbox*
DONE (t=88.68s).
Accumulating evaluation results...
DONE (t=13.35s).
Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.129
Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.225
Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.132
Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.028
Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.114
Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.196
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.184
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.298
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.313I1031 12:16:40.926157 139885496419712 controller.py:32] step: 38000 evaluation metric: {'total_loss': 1.3514221, 'cls_loss': 0.61072814, 'box_loss': 0.00548145, 'model_loss': 0.8848008, 'validation_loss': 1.3514221, 'AP': 0.12941317, 'AP50': 0.22522616, 'AP75': 0.13184388, 'APs': 0.028182765, 'APm': 0.1135972, 'APl': 0.19637726, 'ARmax1': 0.18401565, 'ARmax10': 0.29843047, 'ARmax100': 0.31292313, 'ARs': 0.08129782, 'ARm': 0.3145205, 'ARl': 0.44674942}
I1031 12:16:40.960808 139885496419712 controller.py:167] Train at step 38000 of 40000
I1031 12:16:40.961389 139885496419712 controller.py:334] Entering training loop at step 38000 to run 2000 steps
I1031 12:38:15.597293 139885496419712 controller.py:32] step: 40000 steps_per_second: 0.62 {'total_loss': 1.2835882, 'cls_loss': 0.56903994, 'box_loss': 0.005200124, 'model_loss': 0.82904565, 'training_loss': 1.2835882, 'learning_rate': 0.0175}
I1031 12:38:15.607194 139885496419712 controller.py:201] Running 1564 steps of evaluation at train step: 40000
I1031 13:08:15.942293 139885496419712 coco_evaluator.py:128] There is no annotation_file in COCOEvaluator.
I1031 13:10:33.274583 139885496419712 controller.py:32] step: 40000 evaluation metric: {'total_loss': 1.2893002, 'cls_loss': 0.5780909, 'box_loss': 0.0053695966, 'model_loss': 0.8465711, 'validation_loss': 1.2893002, 'AP': 0.1392194, 'AP50': 0.24242955, 'AP75': 0.14185813, 'APs': 0.03170243, 'APm': 0.11697013, 'APl': 0.21997045, 'ARmax1': 0.19765268, 'ARmax10': 0.3194477, 'ARmax100': 0.3355437, 'ARs': 0.09075803, 'ARm': 0.3163557, 'ARl': 0.4858842}
I1031 13:10:33.310683 139885496419712 controller.py:167] Train at step 40000 of 42000
I1031 13:10:33.311316 139885496419712 controller.py:334] Entering training loop at step 40000 to run 2000 steps
I1031 13:32:07.838726 139885496419712 controller.py:32] step: 42000 steps_per_second: 0.62 {'total_loss': 1.2697237, 'cls_loss': 0.57452255, 'box_loss': 0.0052705845, 'model_loss': 0.8380518, 'training_loss': 1.2697237, 'learning_rate': 0.0175}
I1031 13:32:08.668227 139885496419712 controller.py:381] Saved checkpoints in training_dir/ckpt-42000
I1031 13:32:08.669720 139885496419712 controller.py:201] Running 1564 steps of evaluation at train step: 42000
I1031 14:02:11.143864 139885496419712 coco_evaluator.py:128] There is no annotation_file in COCOEvaluator.
I1031 14:04:29.832409 139885496419712 controller.py:32] step: 42000 evaluation metric: {'total_loss': 1.5447754, 'cls_loss': 0.785464, 'box_loss': 0.006719882, 'model_loss': 1.121458, 'validation_loss': 1.5447754, 'AP': 0.062637836, 'AP50': 0.11126457, 'AP75': 0.06359231, 'APs': 0.011471858, 'APm': 0.05856425, 'APl': 0.09969066, 'ARmax1': 0.12162978, 'ARmax10': 0.19260113, 'ARmax100': 0.19863887, 'ARs': 0.027858548, 'ARm': 0.17577986, 'ARl': 0.3217958}
I1031 14:04:29.866588 139885496419712 controller.py:167] Train at step 42000 of 44000
I1031 14:04:29.867208 139885496419712 controller.py:334] Entering training loop at step 42000 to run 2000 steps
I1031 14:26:03.703194 139885496419712 controller.py:32] step: 44000 steps_per_second: 0.62 {'total_loss': 1.2505301, 'cls_loss': 0.5742789, 'box_loss': 0.0052680196, 'model_loss': 0.83768034, 'training_loss': 1.2505301, 'learning_rate': 0.0175}
I1031 14:26:03.713587 139885496419712 controller.py:201] Running 1564 steps of evaluation at train step: 44000
I1031 14:56:02.766963 139885496419712 coco_evaluator.py:128] There is no annotation_file in COCOEvaluator.
I1031 14:58:16.430249 139885496419712 controller.py:32] step: 44000 evaluation metric: {'total_loss': 1.3667822, 'cls_loss': 0.6602397, 'box_loss': 0.006074844, 'model_loss': 0.9639824, 'validation_loss': 1.3667822, 'AP': 0.09594054, 'AP50': 0.17444487, 'AP75': 0.094005845, 'APs': 0.021597486, 'APm': 0.093030944, 'APl': 0.14369863, 'ARmax1': 0.15980989, 'ARmax10': 0.26280856, 'ARmax100': 0.27431372, 'ARs': 0.059566103, 'ARm': 0.2790743, 'ARl': 0.38445896}
I1031 14:58:16.463191 139885496419712 controller.py:167] Train at step 44000 of 46000
I1031 14:58:16.463725 139885496419712 controller.py:334] Entering training loop at step 44000 to run 2000 steps
I1031 15:19:51.490733 139885496419712 controller.py:32] step: 46000 steps_per_second: 0.62 {'total_loss': 1.2127715, 'cls_loss': 0.56231374, 'box_loss': 0.0051487056, 'model_loss': 0.81974953, 'training_loss': 1.2127715, 'learning_rate': 0.0175}
I1031 15:19:51.501270 139885496419712 controller.py:201] Running 1564 steps of evaluation at train step: 46000
I1031 15:49:52.190389 139885496419712 coco_evaluator.py:128] There is no annotation_file in COCOEvaluator.
I1031 15:52:00.247564 139885496419712 controller.py:32] step: 46000 evaluation metric: {'total_loss': 1.220416, 'cls_loss': 0.57451785, 'box_loss': 0.005251268, 'model_loss': 0.83708125, 'validation_loss': 1.220416, 'AP': 0.14968255, 'AP50': 0.2530588, 'AP75': 0.15353745, 'APs': 0.033541586, 'APm': 0.13266043, 'APl': 0.22829902, 'ARmax1': 0.19840653, 'ARmax10': 0.31818593, 'ARmax100': 0.3341818, 'ARs': 0.08637451, 'ARm': 0.31760666, 'ARl': 0.49106246}
I1031 15:52:00.282764 139885496419712 controller.py:167] Train at step 46000 of 48000
I1031 15:52:00.283312 139885496419712 controller.py:334] Entering training loop at step 46000 to run 2000 steps
I1031 16:13:34.921621 139885496419712 controller.py:32] step: 48000 steps_per_second: 0.62 {'total_loss': 1.1860626, 'cls_loss': 0.557108, 'box_loss': 0.0050959033, 'model_loss': 0.8119028, 'training_loss': 1.1860626, 'learning_rate': 0.0175}
I1031 16:13:34.932175 139885496419712 controller.py:201] Running 1564 steps of evaluation at train step: 48000
I1031 16:43:35.332994 139885496419712 coco_evaluator.py:128] There is no annotation_file in COCOEvaluator.
Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.081
Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.315
Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.447
step: 38000 evaluation metric: {'total_loss': 1.3514221, 'cls_loss': 0.61072814, 'box_loss': 0.00548145, 'model_loss': 0.8848008, 'validation_loss': 1.3514221, 'AP': 0.12941317, 'AP50': 0.22522616, 'AP75': 0.13184388, 'APs': 0.028182765, 'APm': 0.1135972, 'APl': 0.19637726, 'ARmax1': 0.18401565, 'ARmax10': 0.29843047, 'ARmax100': 0.31292313, 'ARs': 0.08129782, 'ARm': 0.3145205, 'ARl': 0.44674942}
step: 40000 steps_per_second: 0.62 {'total_loss': 1.2835882, 'cls_loss': 0.56903994, 'box_loss': 0.005200124, 'model_loss': 0.82904565, 'training_loss': 1.2835882, 'learning_rate': 0.0175}
creating index...
index created!
creating index...
index created!
Running per image evaluation...
Evaluate annotation type *bbox*
DONE (t=96.40s).
Accumulating evaluation results...
DONE (t=13.63s).
Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.139
Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.242
Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.142
Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.032
Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.117
Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.220
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.198
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.319
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.336
Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.091
Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.316
Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.486
step: 40000 evaluation metric: {'total_loss': 1.2893002, 'cls_loss': 0.5780909, 'box_loss': 0.0053695966, 'model_loss': 0.8465711, 'validation_loss': 1.2893002, 'AP': 0.1392194, 'AP50': 0.24242955, 'AP75': 0.14185813, 'APs': 0.03170243, 'APm': 0.11697013, 'APl': 0.21997045, 'ARmax1': 0.19765268, 'ARmax10': 0.3194477, 'ARmax100': 0.3355437, 'ARs': 0.09075803, 'ARm': 0.3163557, 'ARl': 0.4858842}
step: 42000 steps_per_second: 0.62 {'total_loss': 1.2697237, 'cls_loss': 0.57452255, 'box_loss': 0.0052705845, 'model_loss': 0.8380518, 'training_loss': 1.2697237, 'learning_rate': 0.0175}
creating index...
index created!
creating index...
index created!
Running per image evaluation...
Evaluate annotation type *bbox*
DONE (t=96.45s).
Accumulating evaluation results...
DONE (t=14.10s).
Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.063
Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.111
Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.064
Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.011
Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.059
Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.100
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.122
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.193
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.199
Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.028
Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.176
Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.322
step: 42000 evaluation metric: {'total_loss': 1.5447754, 'cls_loss': 0.785464, 'box_loss': 0.006719882, 'model_loss': 1.121458, 'validation_loss': 1.5447754, 'AP': 0.062637836, 'AP50': 0.11126457, 'AP75': 0.06359231, 'APs': 0.011471858, 'APm': 0.05856425, 'APl': 0.09969066, 'ARmax1': 0.12162978, 'ARmax10': 0.19260113, 'ARmax100': 0.19863887, 'ARs': 0.027858548, 'ARm': 0.17577986, 'ARl': 0.3217958}
step: 44000 steps_per_second: 0.62 {'total_loss': 1.2505301, 'cls_loss': 0.5742789, 'box_loss': 0.0052680196, 'model_loss': 0.83768034, 'training_loss': 1.2505301, 'learning_rate': 0.0175}
creating index...
index created!
creating index...
index created!
Running per image evaluation...
Evaluate annotation type *bbox*
DONE (t=93.86s).
Accumulating evaluation results...
DONE (t=13.11s).
Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.096
Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.174
Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.094
Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.022
Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.093
Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.144
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.160
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.263
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.274
Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.060
Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.279
Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.384
step: 44000 evaluation metric: {'total_loss': 1.3667822, 'cls_loss': 0.6602397, 'box_loss': 0.006074844, 'model_loss': 0.9639824, 'validation_loss': 1.3667822, 'AP': 0.09594054, 'AP50': 0.17444487, 'AP75': 0.094005845, 'APs': 0.021597486, 'APm': 0.093030944, 'APl': 0.14369863, 'ARmax1': 0.15980989, 'ARmax10': 0.26280856, 'ARmax100': 0.27431372, 'ARs': 0.059566103, 'ARm': 0.2790743, 'ARl': 0.38445896}
step: 46000 steps_per_second: 0.62 {'total_loss': 1.2127715, 'cls_loss': 0.56231374, 'box_loss': 0.0051487056, 'model_loss': 0.81974953, 'training_loss': 1.2127715, 'learning_rate': 0.0175}
creating index...
index created!
creating index...
index created!
Running per image evaluation...
Evaluate annotation type *bbox*
DONE (t=89.58s).
Accumulating evaluation results...
DONE (t=12.69s).
Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.150
Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.253
Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.154
Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.034
Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.133
Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.228
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.198
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.318
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.334
Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.086
Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.318
Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.491
step: 46000 evaluation metric: {'total_loss': 1.220416, 'cls_loss': 0.57451785, 'box_loss': 0.005251268, 'model_loss': 0.83708125, 'validation_loss': 1.220416, 'AP': 0.14968255, 'AP50': 0.2530588, 'AP75': 0.15353745, 'APs': 0.033541586, 'APm': 0.13266043, 'APl': 0.22829902, 'ARmax1': 0.19840653, 'ARmax10': 0.31818593, 'ARmax100': 0.3341818, 'ARs': 0.08637451, 'ARm': 0.31760666, 'ARl': 0.49106246}
step: 48000 steps_per_second: 0.62 {'total_loss': 1.1860626, 'cls_loss': 0.557108, 'box_loss': 0.0050959033, 'model_loss': 0.8119028, 'training_loss': 1.1860626, 'learning_rate': 0.0175}
creating index...
index created!
creating index...
index created!
Running per image evaluation...
Evaluate annotation type *bbox*
DONE (t=94.08s).
Accumulating evaluation results...
DONE (t=13.05s).
Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.156I1031 16:45:49.831901 139885496419712 controller.py:32] step: 48000 evaluation metric: {'total_loss': 1.190438, 'cls_loss': 0.5644366, 'box_loss': 0.0052165976, 'model_loss': 0.8252664, 'validation_loss': 1.190438, 'AP': 0.15589666, 'AP50': 0.2666446, 'AP75': 0.1579226, 'APs': 0.032910142, 'APm': 0.13874066, 'APl': 0.22911705, 'ARmax1': 0.20407797, 'ARmax10': 0.32992476, 'ARmax100': 0.34607226, 'ARs': 0.10615714, 'ARm': 0.3494303, 'ARl': 0.48322043}
I1031 16:45:49.869277 139885496419712 controller.py:167] Train at step 48000 of 50000
I1031 16:45:49.869823 139885496419712 controller.py:334] Entering training loop at step 48000 to run 2000 steps
I1031 17:07:24.221114 139885496419712 controller.py:32] step: 50000 steps_per_second: 0.62 {'total_loss': 1.1622236, 'cls_loss': 0.55269384, 'box_loss': 0.005057054, 'model_loss': 0.8055469, 'training_loss': 1.1622236, 'learning_rate': 0.0175}
I1031 17:07:25.054098 139885496419712 controller.py:381] Saved checkpoints in training_dir/ckpt-50000
I1031 17:07:25.055474 139885496419712 controller.py:201] Running 1564 steps of evaluation at train step: 50000
I1031 17:37:31.005617 139885496419712 coco_evaluator.py:128] There is no annotation_file in COCOEvaluator.
I1031 17:39:58.140462 139885496419712 controller.py:32] step: 50000 evaluation metric: {'total_loss': 1.182763, 'cls_loss': 0.5719879, 'box_loss': 0.0052478183, 'model_loss': 0.83437854, 'validation_loss': 1.182763, 'AP': 0.15345125, 'AP50': 0.25885797, 'AP75': 0.1577261, 'APs': 0.03554234, 'APm': 0.1326204, 'APl': 0.23334704, 'ARmax1': 0.20183603, 'ARmax10': 0.3270075, 'ARmax100': 0.34437168, 'ARs': 0.10711023, 'ARm': 0.34062216, 'ARl': 0.49045214}
I1031 17:39:58.175596 139885496419712 controller.py:167] Train at step 50000 of 52000
I1031 17:39:58.176388 139885496419712 controller.py:334] Entering training loop at step 50000 to run 2000 steps
I1031 18:02:00.363424 139885496419712 controller.py:32] step: 52000 steps_per_second: 0.61 {'total_loss': 1.1411799, 'cls_loss': 0.5503363, 'box_loss': 0.0050072405, 'model_loss': 0.8006991, 'training_loss': 1.1411799, 'learning_rate': 0.0175}
I1031 18:02:00.402074 139885496419712 controller.py:201] Running 1564 steps of evaluation at train step: 52000
I1031 18:32:04.887839 139885496419712 coco_evaluator.py:128] There is no annotation_file in COCOEvaluator.
I1031 18:34:26.981041 139885496419712 controller.py:32] step: 52000 evaluation metric: {'total_loss': 1.1437737, 'cls_loss': 0.5545166, 'box_loss': 0.0051303557, 'model_loss': 0.8110344, 'validation_loss': 1.1437737, 'AP': 0.16220997, 'AP50': 0.2706167, 'AP75': 0.16882622, 'APs': 0.03653328, 'APm': 0.13979578, 'APl': 0.24624981, 'ARmax1': 0.20707768, 'ARmax10': 0.33736664, 'ARmax100': 0.35503754, 'ARs': 0.09993061, 'ARm': 0.35223114, 'ARl': 0.5025141}
I1031 18:34:27.030765 139885496419712 controller.py:167] Train at step 52000 of 54000
I1031 18:34:27.031316 139885496419712 controller.py:334] Entering training loop at step 52000 to run 2000 steps
I1031 18:56:05.414261 139885496419712 controller.py:32] step: 54000 steps_per_second: 0.62 {'total_loss': 1.1175134, 'cls_loss': 0.5445361, 'box_loss': 0.00495084, 'model_loss': 0.79207766, 'training_loss': 1.1175134, 'learning_rate': 0.0175}
I1031 18:56:05.438895 139885496419712 controller.py:201] Running 1564 steps of evaluation at train step: 54000
I1031 19:26:06.722558 139885496419712 coco_evaluator.py:128] There is no annotation_file in COCOEvaluator.
I1031 19:28:20.350446 139885496419712 controller.py:32] step: 54000 evaluation metric: {'total_loss': 1.1411448, 'cls_loss': 0.566344, 'box_loss': 0.005129792, 'model_loss': 0.8228335, 'validation_loss': 1.1411448, 'AP': 0.15545042, 'AP50': 0.2636171, 'AP75': 0.15803272, 'APs': 0.028823914, 'APm': 0.13085106, 'APl': 0.23903799, 'ARmax1': 0.20254348, 'ARmax10': 0.32471123, 'ARmax100': 0.34092107, 'ARs': 0.088891774, 'ARm': 0.32869858, 'ARl': 0.49599567}
I1031 19:28:20.382083 139885496419712 controller.py:167] Train at step 54000 of 56000
I1031 19:28:20.382549 139885496419712 controller.py:334] Entering training loop at step 54000 to run 2000 steps
...@@ -19,4 +19,4 @@ from official.modeling.activations.sigmoid import hard_sigmoid ...@@ -19,4 +19,4 @@ from official.modeling.activations.sigmoid import hard_sigmoid
from official.modeling.activations.swish import hard_swish from official.modeling.activations.swish import hard_swish
from official.modeling.activations.swish import identity from official.modeling.activations.swish import identity
from official.modeling.activations.swish import simple_swish from official.modeling.activations.swish import simple_swish
\ No newline at end of file
...@@ -15,9 +15,7 @@ ...@@ -15,9 +15,7 @@
"""All necessary imports for registration.""" """All necessary imports for registration."""
# pylint: disable=unused-import # pylint: disable=unused-import
from official.nlp import tasks as nlp_task from official.common import registry_imports
from official.utils.testing import mock_task
from official.vision import beta
from official.vision.beta.projects import yolo from official.vision.beta.projects import yolo
from official.vision.beta.projects.yolo.modeling.backbones import darknet from official.vision.beta.projects.yolo.modeling.backbones import darknet
......
...@@ -20,7 +20,7 @@ from official.core import input_reader ...@@ -20,7 +20,7 @@ from official.core import input_reader
from official.core import task_factory from official.core import task_factory
from official.modeling import tf_utils from official.modeling import tf_utils
from official.vision.beta.projects.yolo.configs import darknet_classification as exp_cfg from official.vision.beta.projects.yolo.configs import darknet_classification as exp_cfg
from official.vision.beta.projects.yolo.dataloaders import classification_input as cli from official.vision.beta.projects.yolo.dataloaders import classification_tfds_decoder as cli
from official.vision.beta.dataloaders import classification_input from official.vision.beta.dataloaders import classification_input
from official.vision.beta.modeling import factory from official.vision.beta.modeling import factory
from official.vision.beta.tasks import image_classification from official.vision.beta.tasks import image_classification
......
...@@ -35,8 +35,6 @@ FLAGS = flags.FLAGS ...@@ -35,8 +35,6 @@ FLAGS = flags.FLAGS
def main(_): def main(_):
gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_params) gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_params)
params = train_utils.parse_configuration(FLAGS) params = train_utils.parse_configuration(FLAGS)
import pprint
pprint.pprint(params.as_dict())
model_dir = FLAGS.model_dir model_dir = FLAGS.model_dir
if 'train' in FLAGS.mode: if 'train' in FLAGS.mode:
# Pure eval modes do not output yaml files. Otherwise continuous eval job # Pure eval modes do not output yaml files. Otherwise continuous eval job
......
...@@ -81,6 +81,7 @@ def make_distributed_dataset(strategy, dataset_or_fn, *args, **kwargs): ...@@ -81,6 +81,7 @@ def make_distributed_dataset(strategy, dataset_or_fn, *args, **kwargs):
if "input_context" in arg_names: if "input_context" in arg_names:
kwargs["input_context"] = input_context kwargs["input_context"] = input_context
return dataset_or_fn(*args, **kwargs) return dataset_or_fn(*args, **kwargs)
return strategy.distribute_datasets_from_function(dataset_fn) return strategy.distribute_datasets_from_function(dataset_fn)
......
runtime:
distribution_strategy: 'mirrored'
mixed_precision_dtype: 'float32'
loss_scale: 'dynamic'
num_gpus: 1
task:
init_checkpoint: Null
model:
num_classes: 80
input_size: [640, 640, 3]
min_level: 3
max_level: 7
losses:
l2_weight_decay: 0.0001
train_data:
input_path: Null
tfds_name: 'coco/2017'
tfds_split: 'train'
tfds_download: True
is_training: True
global_batch_size: 2
dtype: 'float16'
cycle_length: 5
decoder:
type: tfds_decoder
shuffle_buffer_size: 2
validation_data:
input_path: Null
tfds_name: 'coco/2017'
tfds_split: 'validation'
tfds_download: True
# tfds_skip_decoding_feature: source_id,image,height,width,groundtruth_classes,groundtruth_is_crowd,groundtruth_area,groundtruth_boxes
is_training: False
global_batch_size: 2
dtype: 'float16'
cycle_length: 10
decoder:
type: tfds_decoder
shuffle_buffer_size: 2
trainer:
train_steps: 4257792
validation_steps: 2500
validation_interval: 5000
steps_per_loop: 100 #59136
summary_interval: 100 #59136
checkpoint_interval: 59136
optimizer_config:
optimizer:
type: 'sgd'
sgd:
momentum: 0.9
# learning_rate:
# type: 'cosine'
# cosine:
# initial_learning_rate: 0.0021875
# decay_steps: 4257792
# alpha: 0.01
# Stepwise version
learning_rate:
type: 'stepwise'
stepwise:
# boundaries: [26334, 30954]
boundaries: [3370752, 3962112]
# values: [0.28, 0.028, 0.0028]
values: [0.0021875, 0.00021875, 0.000021875]
warmup:
type: 'linear'
linear:
warmup_steps: 64000
warmup_learning_rate: 0.0000523
import tensorflow_datasets as tfds
import tensorflow as tf
from official.vision.beta.dataloaders import decoder
import matplotlib.pyplot as plt
import cv2
class TfdsExampleDecoder(decoder.Decoder):
"""Tensorflow Dataset Example proto decoder."""
def __init__(self,
include_mask=False,
regenerate_source_id=False):
self._include_mask = include_mask
self._regenerate_source_id = regenerate_source_id
def decode(self, serialized_example):
"""Decode the serialized example.
Args:
serialized_example: a single serialized tf.Example string.
Returns:
decoded_tensors: a dictionary of tensors with the following fields:
- source_id: a string scalar tensor.
- image: a uint8 tensor of shape [None, None, 3].
- height: an integer scalar tensor.
- width: an integer scalar tensor.
- groundtruth_classes: a int64 tensor of shape [None].
- groundtruth_is_crowd: a bool tensor of shape [None].
- groundtruth_area: a float32 tensor of shape [None].
- groundtruth_boxes: a float32 tensor of shape [None, 4].
- groundtruth_instance_masks: a float32 tensor of shape
[None, None, None].
- groundtruth_instance_masks_png: a string tensor of shape [None].
"""
decoded_tensors = {
'source_id': serialized_example['image/id'],
'image': serialized_example['image'],
'height': tf.shape(serialized_example['image'])[0],
'width': tf.shape(serialized_example['image'])[1],
'groundtruth_classes': serialized_example['objects']['label'],
'groundtruth_is_crowd': serialized_example['objects']['is_crowd'],
'groundtruth_area': serialized_example['objects']['area'],
'groundtruth_boxes': serialized_example['objects']['bbox'],
}
return decoded_tensors
runtime:
all_reduce_alg: null
batchnorm_spatial_persistent: false
dataset_num_private_threads: null
default_shard_dim: -1
distribution_strategy: mirrored
enable_xla: false
gpu_thread_mode: null
loss_scale: dynamic
mixed_precision_dtype: float16
num_cores_per_replica: 1
num_gpus: 2
num_packs: 1
per_gpu_thread_count: 0
run_eagerly: false
task_index: -1
tpu: null
worker_hosts: null
task:
gradient_clip_norm: 0.0
init_checkpoint: ''
logging_dir: null
losses:
l2_weight_decay: 0.0005
label_smoothing: 0.0
one_hot: true
model:
add_head_batch_norm: false
backbone:
darknet:
model_id: cspdarknet53
type: darknet
dropout_rate: 0.0
input_size: [256, 256, 3]
norm_activation:
activation: mish
norm_epsilon: 0.001
norm_momentum: 0.99
use_sync_bn: false
num_classes: 1001
train_data:
block_length: 1
cache: false
cycle_length: 10
deterministic: null
drop_remainder: true
dtype: float16
enable_tf_data_service: false
global_batch_size: 16
input_path: ''
is_training: true
sharding: true
shuffle_buffer_size: 100
tf_data_service_address: null
tf_data_service_job_name: null
tfds_as_supervised: false
tfds_data_dir: ~/tensorflow_datasets/
tfds_download: true
tfds_name: imagenet2012
tfds_skip_decoding_feature: ''
tfds_split: train
validation_data:
block_length: 1
cache: false
cycle_length: 10
deterministic: null
drop_remainder: false
dtype: float16
enable_tf_data_service: false
global_batch_size: 16
input_path: ''
is_training: true
sharding: true
shuffle_buffer_size: 100
tf_data_service_address: null
tf_data_service_job_name: null
tfds_as_supervised: false
tfds_data_dir: ~/tensorflow_datasets/
tfds_download: true
tfds_name: imagenet2012
tfds_skip_decoding_feature: ''
tfds_split: validation
trainer:
allow_tpu_summary: false
best_checkpoint_eval_metric: ''
best_checkpoint_export_subdir: ''
best_checkpoint_metric_comp: higher
checkpoint_interval: 10000
continuous_eval_timeout: 3600
eval_tf_function: true
max_to_keep: 5
optimizer_config:
ema: null
learning_rate:
polynomial:
cycle: false
decay_steps: 9592000
end_learning_rate: 1.25e-05
initial_learning_rate: 0.0125
name: PolynomialDecay
power: 4.0
type: polynomial
optimizer:
sgd:
clipnorm: null
clipvalue: null
decay: 0.0
momentum: 0.9
name: SGD
nesterov: false
type: sgd
warmup:
linear:
name: linear
warmup_learning_rate: 0
warmup_steps: 8000
type: linear
steps_per_loop: 10000
summary_interval: 10000
train_steps: 9600000
train_tf_function: true
train_tf_while_loop: true
validation_interval: 10000
validation_steps: 3200
import collections
import collections.abc
import io
from ..file_manager import PathABC
from typing import Union, Type, TypeVar
T = TypeVar('T', bound='DarkNetModel')
class _DarkNetSectionList(collections.abc.MutableSequence):
__slots__ = ['data']
def __init__(self, initlist=None):
self.data = []
if initlist is not None:
self.data = list(initlist)
@property
def net(self):
return self.data[0]
# Overriding Python list operations
def __len__(self):
return max(0, len(self.data) - 1)
def __getitem__(self, i):
if i >= 0:
i += 1
if isinstance(i, slice):
return self.__class__(self.data[i])
else:
return self.data[i]
def __setitem__(self, i, item):
if i >= 0:
i += 1
self.data[i] = item
def __delitem__(self, i):
if i >= 0:
i += 1
del self.data[i]
def insert(self, i, item):
if i >= 0:
i += 1
self.data.insert(i, item)
class DarkNetConverter(_DarkNetSectionList):
"""
This is a special list-like object to handle the storage of layers in a
model that is defined in the DarkNet format. Note that indexing layers in a
DarkNet model can be unintuitive and doesn't follow the same conventions
as a Python list.
In DarkNet, a [net] section is at the top of every model definition. This
section defines the input and training parameters for the entire model.
As such, it is not a layer and cannot be referenced directly. For our
convenience, we allowed relative references to [net] but disallowed absolute
ones. Like the DarkNet implementation, our implementation numbers the first
layer (after [net]) with a 0 and
To use conventional list operations on the DarkNetConverter object, use the
data property provided by this class.
"""
@classmethod
def read(
clz: Type[T],
config_file: Union[PathABC, io.TextIOBase],
weights_file: Union[PathABC, io.RawIOBase,
io.BufferedIOBase] = None) -> T:
"""
Parse the config and weights files and read the DarkNet layer's encoder,
decoder, and output layers. The number of bytes in the file is also returned.
Args:
config_file: str, path to yolo config file from Darknet
weights_file: str, path to yolo weights file from Darknet
Returns:
a DarkNetConverter object
"""
from .read_weights import read_weights
full_net = clz()
read_weights(full_net, config_file, weights_file)
return full_net
def to_tf(self,
thresh=0.45,
class_thresh=0.45,
max_boxes=200,
use_mixed=True):
import tensorflow as tf
tensors = _DarkNetSectionList()
layers = _DarkNetSectionList()
yolo_tensors = []
for i, cfg in enumerate(self.data):
tensor = cfg.to_tf(tensors)
# Handle weighted layers
if type(tensor) is tuple:
tensor, layer = tensor
else:
layer = None
assert tensor.shape[1:] == cfg.shape, str(
cfg
) + f" shape inconsistent\n\tExpected: {cfg.shape}\n\tGot: {tensor.shape[1:]}"
if cfg._type == 'yolo':
yolo_tensors.append((i, cfg, tensor))
tensors.append(tensor)
layers.append(layer)
model = tf.keras.Model(inputs=tensors.net,
outputs=self._process_yolo_layer(
yolo_tensors,
thresh=thresh,
class_thresh=class_thresh,
max_boxes=max_boxes,
use_mixed=use_mixed))
model.build(self.net.shape)
for cfg, layer in zip(self, layers):
if layer is not None:
layer.set_weights(cfg.get_weights())
return model
def _process_yolo_layer(self,
yolo_tensors,
thresh=0.45,
class_thresh=0.45,
max_boxes=200,
use_mixed=True):
import tensorflow as tf
from yolo.modeling.building_blocks import YoloLayer
if use_mixed:
from tensorflow.keras.mixed_precision import experimental as mixed_precision
# using mixed type policy give better performance than strictly float32
policy = mixed_precision.Policy('mixed_float16')
mixed_precision.set_policy(policy)
dtype = policy.compute_dtype
else:
dtype = tf.float32
outs = collections.OrderedDict()
masks = {}
anchors = None
scale_x_y = 1
path_scales = {}
for i, yolo_cfg, yolo_tensor in yolo_tensors:
masks[yolo_tensor.name] = yolo_cfg.mask
if anchors is None:
anchors = yolo_cfg.anchors
elif anchors != yolo_cfg.anchors:
raise ValueError('Anchors inconsistent in [yolo] layers')
if scale_x_y is None:
scale_x_y = yolo_cfg.scale_x_y
elif scale_x_y != yolo_cfg.scale_x_y:
raise ValueError('Scale inconsistent in [yolo] layers')
outs[yolo_tensor.name] = yolo_tensor
path_scales[yolo_tensor.name] = self.data[i - 1].c >> 5
yolo_layer = YoloLayer(
masks=masks,
anchors=anchors,
thresh=thresh,
cls_thresh=class_thresh,
max_boxes=max_boxes,
dtype=dtype,
#scale_boxes=self.net.w,
scale_xy=scale_x_y,
path_scale=path_scales)
return yolo_layer(outs)
#!/usr/bin/env python3
"Convert a DarkNet config file and weights into a TensorFlow model"
from absl import flags as _flags
from absl.flags import argparse_flags as _argparse_flags
import argparse as _argparse
_flags.DEFINE_boolean('weights_only', False,
'Save only the weights and not the entire model.')
from . import DarkNetConverter
def _makeParser(parser):
parser.add_argument('cfg',
default=None,
help='name of the config file. Defaults to YOLOv3',
type=_argparse.FileType('r'),
nargs='?')
parser.add_argument('weights',
default=None,
help='name of the weights file. Defaults to YOLOv3',
type=_argparse.FileType('rb'),
nargs='?')
parser.add_argument(
'output', help='name of the location to save the generated model')
def main(argv, args=None):
from ..file_manager import download
import os
if args is None:
args = _parser.parse_args(argv[1:])
cfg = args.cfg
weights = args.weights
output = args.output
if cfg is None:
cfg = download('yolov3.cfg')
if weights is None:
weights = download('yolov3.weights')
model = DarkNetConverter.read(cfg, weights).to_tf()
if output != os.devnull:
if flags.FLAGS.weights_only:
model.save_weights(output)
else:
model.save(output)
_parser = _argparse_flags.ArgumentParser()
_makeParser(_parser)
from absl import app
import sys
from . import main, _parser
if __name__ == '__main__':
# I dislike Abseil's current help menu. I like the default Python one
# better
if '-h' in sys.argv or '--help' in sys.argv:
_parser.parse_args(sys.argv[1:])
exit()
app.run(main)
"""
This file contains the layers (Config objects) that are used by the Darknet
config file parser.
For more details on the layer types and layer parameters, visit https://github.com/AlexeyAB/darknet/wiki/CFG-Parameters-in-the-different-layers
Currently, the parser is incomplete and we can only guarantee that it works for
models in the YOLO family (YOLOv3 and older).
"""
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
import numpy as np
from typing import Tuple, Sequence, List
class Config(ABC):
"""
The base class for all layers that are used by the parser. Each subclass
defines a new layer type. Most nodes correspond to distinct layers that
appear in the final network. [net] corresponds to the input to the model.
Each subclass must be a @dataclass and must have the following fields:
```{python}
_type: str = None
w: int = field(init=True, repr=True, default=0)
h: int = field(init=True, repr=True, default=0)
c: int = field(init=True, repr=True, default=0)
```
These fields are used when linking different layers together, but weren't
included in the Config class due to limitations in the dataclasses package.
(w, h, c) will correspond to the different input dimensions of a DarkNet
layer: the width, height, and number of channels.
"""
@property
@abstractmethod
def shape(self) -> Tuple[int, int, int]:
'''
Output shape of the layer. The output must be a 3-tuple of ints
corresponding to the the width, height, and number of channels of the
output.
Returns:
A tuple corresponding to the output shape of the layer.
'''
return
def load_weights(self, files) -> int:
'''
Load the weights for the current layer from a file.
Arguments:
files: Open IO object for the DarkNet weights file
Returns:
the number of bytes read.
'''
return 0
def get_weights(self) -> list:
'''
Returns:
a list of Numpy arrays consisting of all of the weights that
were loaded from the weights file
'''
return []
@classmethod
def from_dict(clz, net, layer_dict) -> "Config":
'''
Create a layer instance from the previous layer and a dictionary
containing all of the parameters for the DarkNet layer. This is how
linking is done by the parser.
'''
if 'w' not in layer_dict:
prevlayer = net[-1]
l = {
"w": prevlayer.shape[0],
"h": prevlayer.shape[1],
"c": prevlayer.shape[2],
**layer_dict
}
else:
l = layer_dict
return clz(**l)
@abstractmethod
def to_tf(self, tensors):
"""
Convert the DarkNet configuration object to a tensor given the previous
tensors that occoured in the network. This function should also return
a Keras layer if it has weights.
Returns:
if weights: a tuple consisting of the output tensor and Keras layer
if no weights: the output tensor
"""
return None
class _LayerBuilder(dict):
"""
This class defines a registry for the layer builder in the DarkNet weight
parser. It allows for syntactic sugar when registering Config subclasses to
the parser.
"""
def __getitem__(self, key):
try:
return super().__getitem__(key)
except KeyError as e:
raise KeyError(f"Unknown layer type: {key}") from e
def register(self, *layer_types: str):
'''
Register a parser node (layer) class with the layer builder.
'''
def decorator(clz):
for layer_type in layer_types:
self[layer_type] = clz
return clz
return decorator
layer_builder = _LayerBuilder()
@layer_builder.register('conv', 'convolutional')
@dataclass
class convCFG(Config):
_type: str = None
w: int = field(init=True, repr=True, default=0)
h: int = field(init=True, repr=True, default=0)
c: int = field(init=True, repr=True, default=0)
size: int = field(init=True, repr=True, default=0)
stride: int = field(init=True, repr=True, default=0)
pad: int = field(init=True, repr=True, default=0)
filters: int = field(init=True, repr=True, default=0)
activation: str = field(init=True, repr=False, default='linear')
groups: int = field(init=True, repr=False, default=1)
batch_normalize: int = field(init=True, repr=False, default=0)
dilation: int = field(init=True, repr=False, default=1)
nweights: int = field(repr=False, default=0)
biases: np.array = field(repr=False, default=None) #
weights: np.array = field(repr=False, default=None)
scales: np.array = field(repr=False, default=None)
rolling_mean: np.array = field(repr=False, default=None)
rolling_variance: np.array = field(repr=False, default=None)
def __post_init__(self):
self.pad = (self.size - 1) // 2
self.nweights = int(
(self.c / self.groups) * self.filters * self.size * self.size)
return
@property
def shape(self):
w = len_width(self.w, self.size, self.pad, self.stride)
h = len_width(self.h, self.size, self.pad, self.stride)
return (w, h, self.filters)
def load_weights(self, files):
self.biases = read_n_floats(self.filters, files)
bytes_read = self.filters
if self.batch_normalize == 1:
self.scales = read_n_floats(self.filters, files)
self.rolling_mean = read_n_floats(self.filters, files)
self.rolling_variance = read_n_floats(self.filters, files)
bytes_read += self.filters * 3
# used as a guide:
# https://github.com/thtrieu/darkflow/blob/master/darkflow/dark/convolution.py
weights = read_n_floats(self.nweights, files)
self.weights = weights.reshape(self.filters, self.c, self.size,
self.size).transpose([2, 3, 1, 0])
bytes_read += self.nweights
return bytes_read * 4
def get_weights(self, printing=False):
if printing:
print(
"[weights, biases, biases, scales, rolling_mean, rolling_variance]"
)
if self.batch_normalize:
return [
self.weights,
self.scales, #gamma
self.biases, #beta
self.rolling_mean,
self.rolling_variance
]
else:
return [self.weights, self.biases]
def to_tf(self, tensors):
from official.vision.beta.projects.yolo.modeling.layers.nn_blocks import DarkConv
layer = DarkConv(
filters=self.filters,
kernel_size=(self.size, self.size),
strides=(self.stride, self.stride),
padding='same',
dilation_rate=(self.dilation, self.dilation),
use_bn=bool(self.batch_normalize),
activation=activation_function_dn_to_keras_name(self.activation),
) # TODO: Where does groups go
return layer(tensors[-1]), layer
@layer_builder.register('local')
@dataclass
class localCfg(Config):
# implementation based on:
# https://github.com/thtrieu/darkflow/blob/master/darkflow/dark/convolution.py
# l.4-l.25
_type: str = None
w: int = field(init=True, repr=True, default=0)
h: int = field(init=True, repr=True, default=0)
c: int = field(init=True, repr=True, default=0)
size: int = field(init=True, repr=True, default=0)
stride: int = field(init=True, repr=True, default=0)
pad: int = field(init=True, repr=True, default=0)
filters: int = field(init=True, repr=True, default=0)
activation: str = field(init=True, repr=False, default='linear')
groups: int = field(init=True, repr=False, default=1)
nweights: int = field(repr=False, default=0)
weights: np.array = field(repr=False, default=None)
biases: np.array = field(repr=False, default=None)
def __post_init__(self):
self.pad = int(self.pad) * int(self.size / 2) if self.size != 1 else 0
w = len_width(self.w, self.size, self.pad, self.stride)
h = len_width(self.h, self.size, self.pad, self.stride)
self.nweights = int(
self.filters * self.size * self.size)
return
@property
def shape(self):
w = len_width(self.w, self.size, self.pad, self.stride)
h = len_width(self.h, self.size, self.pad, self.stride)
return (w, h, self.filters)
def load_weights(self, files):
w = len_width(self.w, self.size, self.pad, self.stride)
h = len_width(self.h, self.size, self.pad, self.stride)
self.biases = read_n_floats(w * h * self.filters, files)
bytes_read = self.filters * w * h
weights = read_n_floats(self.nweights, files)
# self.weights = weights.reshape(self.h * self.w, self.filters, self.c, self.size,
# self.size).transpose([0, 3, 4, 2, 1])
bytes_read += self.nweights
return bytes_read * 4
def get_weights(self, printing=False):
if printing:
print(
"[weights, biases]"
)
return [self.weights, self.biases]
def to_tf(self, tensors):
from tensorflow.keras.layers import LocallyConnected2D, ZeroPadding2D, LeakyReLU
zero_pad_layer = ZeroPadding2D(
padding=self.pad
)
if self.activation == "leaky":
self.activation = LeakyReLU(alpha=0.1)
local_layer = LocallyConnected2D(
filters=self.filters,
kernel_size=(self.size, self.size),
strides=(self.stride, self.stride),
padding='valid', # currently LocallyConnected2D only supports 'valid
activation=activation_function_dn_to_keras_name(self.activation),
)
return local_layer(zero_pad_layer(tensors[-1])), local_layer
#return local_layer(tensors[-1]), local_layer
@layer_builder.register('shortcut')
@dataclass
class shortcutCFG(Config):
_type: str = None
w: int = field(init=True, default=0)
h: int = field(init=True, default=0)
c: int = field(init=True, default=0)
_from: List[int] = field(init=True, default_factory=list)
activation: str = field(init=True, default='linear')
@property
def shape(self):
return (self.w, self.h, self.c)
@classmethod
def from_dict(clz, net, layer_dict):
'''
Create a layer instance from the previous layer and a dictionary
containing all of the parameters for the DarkNet layer. This is how
linking is done by the parser.
'''
_from = layer_dict['from']
if type(_from) is not tuple:
_from = (_from, )
prevlayer = net[-1]
l = {
"_type": layer_dict['_type'],
"w": prevlayer.shape[0],
"h": prevlayer.shape[1],
"c": prevlayer.shape[2],
"_from": _from,
"activation": layer_dict['activation'],
}
return clz(**l)
def to_tf(self, tensors):
from tensorflow.keras.layers import add
from tensorflow.keras.activations import get
activation = get(activation_function_dn_to_keras_name(self.activation))
my_tensors = [tensors[-1]]
for i in self._from:
my_tensors.append(tensors[i])
return activation(add(my_tensors))
@layer_builder.register('route')
@dataclass
class routeCFG(Config):
_type: str = None
w: int = field(init=True, default=0)
h: int = field(init=True, default=0)
c: int = field(init=True, default=0)
layers: List[int] = field(init=True, default_factory=list)
groups: int = field(repr=False, default=1)
group_id: int = field(repr=False, default=0)
@property
def shape(self):
return (self.w, self.h, self.c // self.groups)
@classmethod
def from_dict(clz, net, layer_dict):
# Calculate shape of the route
layers = layer_dict['layers']
if type(layers) is tuple:
layers_iter = iter(layers)
w, h, c = net[next(layers_iter)].shape
for l in layers_iter:
lw, lh, lc = net[l].shape
if (lw, lh) != (w, h):
raise ValueError(
f"Width and heights of route layer [#{len(net)}] inputs {layers} do not match.\n Previous: {(w, h)}\n New: {(lw, lh)}"
)
c += lc
else:
w, h, c = net[layers].shape
layers = (layers, )
assert c % layer_dict.get(
'groups', 1
) == 0, "The number of channels must evenly divide among the groups."
# Create layer
l = layer_dict.copy()
l["w"] = w
l["h"] = h
l["c"] = c
l["layers"] = layers
return clz(**l)
def to_tf(self, tensors):
import tensorflow as tf
from tensorflow.keras.layers import concatenate
if len(self.layers) == 1:
stacked = tensors[self.layers[0]]
else:
my_tensors = []
for i in self.layers:
my_tensors.append(tensors[i])
stacked = concatenate(my_tensors)
if self.groups == 1:
return stacked
else:
return tf.split(stacked, self.groups, axis=-1)[self.group_id]
@layer_builder.register('net', 'network')
@dataclass
class netCFG(Config):
_type: str = None
w: int = field(init=True, default=0)
h: int = field(init=True, default=0)
c: int = field(init=True, default=0)
@property
def shape(self):
return (self.w, self.h, self.c)
@classmethod
def from_dict(clz, net, layer_dict):
assert len(
net.data
) == 0, "A [net] section cannot occour in the middle of a DarkNet model"
l = {
"_type": layer_dict["_type"],
"w": layer_dict["width"],
"h": layer_dict["height"],
"c": layer_dict["channels"]
}
return clz(**l)
def to_tf(self, tensors):
from tensorflow.keras import Input
return Input(shape=[self.w, self.h, self.c])
@layer_builder.register('yolo')
@dataclass
class yoloCFG(Config):
_type: str = None
w: int = field(init=True, default=0)
h: int = field(init=True, default=0)
c: int = field(init=True, default=0)
mask: List[int] = field(init=True, default_factory=list)
anchors: List[Tuple[int, int]] = field(init=True, default_factory=list)
scale_x_y: int = field(init=True, default=1)
@property
def shape(self):
return (self.w, self.h, self.c)
@classmethod
def from_dict(clz, net, layer_dict):
prevlayer = net[-1]
l = {
"_type": layer_dict['_type'],
"mask": layer_dict['mask'],
"anchors": layer_dict['anchors'],
"w": prevlayer.shape[0],
"h": prevlayer.shape[1],
"c": prevlayer.shape[2]
}
return clz(**l)
def to_tf(self, tensors):
return tensors[-1] # TODO: Fill out
@layer_builder.register('upsample')
@dataclass
class upsampleCFG(Config):
_type: str = None
w: int = field(init=True, default=0)
h: int = field(init=True, default=0)
c: int = field(init=True, default=0)
stride: int = field(init=True, default=2)
@property
def shape(self):
return (self.stride * self.w, self.stride * self.h, self.c)
def to_tf(self, tensors):
from tensorflow.keras.layers import UpSampling2D
return UpSampling2D(size=(self.stride, self.stride))(tensors[-1])
@layer_builder.register('maxpool')
@dataclass
class maxpoolCFG(Config):
_type: str = None
w: int = field(init=True, default=0)
h: int = field(init=True, default=0)
c: int = field(init=True, default=0)
stride: int = field(init=True, default=2)
size: int = field(init=True, default=2)
@property
def shape(self):
pad = 0 if self.stride == 1 else 1
#print((self.w//self.stride, self.h//self.stride, self.c))
return (
self.w // self.stride, self.h // self.stride, self.c
) #((self.w - self.size) // self.stride + 2, (self.h - self.size) // self.stride + 2, self.c)
def to_tf(self, tensors):
#from tensorflow.nn import max_pool2d
from tensorflow.keras.layers import MaxPooling2D
return MaxPooling2D(pool_size=(self.size, self.size),
strides=(self.stride, self.stride),
padding='same')(tensors[-1])
@layer_builder.register('connected')
@dataclass
class connectedCFG(Config):
# Used as guide: https://github.com/thtrieu/darkflow/blob/master/darkflow/dark/connected.py
_type: str = None
w: int = field(init=True, repr=True, default=0)
h: int = field(init=True, repr=True, default=0)
c: int = field(init=True, repr=True, default=0)
output: int = field(init=True, repr=True, default=1715)
activation: str = field(init=True, repr=False, default='linear')
nweights: int = field(repr=False, default=0)
biases: np.array = field(repr=False, default=None)
weights: np.array = field(repr=False, default=None)
def __post_init__(self):
# number of weights (exlucding bias = input size * output size)
self.nweights = int(
self.c * self.w * self.h * self.output)
return
@property
def shape(self):
return (self.output,)
def load_weights(self, files):
self.biases = read_n_floats(self.output, files)
bytes_read = self.output
weights = read_n_floats(self.nweights, files)
#self.weights = weights.reshape(self.c * self.w * self.h, self.output)
bytes_read += self.nweights
return bytes_read * 4
def get_weights(self, printing=False):
if printing:
print("[weights, biases]")
return [self.weights, self.biases]
def to_tf(self, tensors):
from tensorflow.keras.layers import Dense, Flatten
layer1 = Flatten()
layer2 = Dense(
self.output,
activation=activation_function_dn_to_keras_name(self.activation)
)
return layer2(layer1(tensors[-1])), layer2
@layer_builder.register('detection')
@dataclass
class detectionCFG(Config):
_type: str = None
w: int = field(init=True, repr=True, default=0)
h: int = field(init=True, repr=True, default=0)
c: int = field(init=True, repr=True, default=0)
classes: int = field(init=True, repr=True, default=20)
coords: int = field(init=True, repr=True, default=4)
rescore: int = field(init=True, repr=True, default=1)
side: int = field(init=True, repr=True, default=7)
num: int = field(init=True, repr=True, default=3)
softmax: int = field(init=True, repr=True, default=0)
sqrt: int = field(init=True, repr=True, default=1)
jitter: float = field(init=True, repr=True, default=0.2)
object_scale: int = field(init=True, repr=True, default=1)
noobject_scale: int = field(init=True, repr=True, default=0.5)
class_scale: int = field(init=True, repr=True, default=1)
coord_scale: int = field(init=True, repr=True, default=5)
@classmethod
def from_dict(clz, net, layer_dict):
if 'w' not in layer_dict:
prevlayer = net[-1]
l = {
"w": prevlayer.shape[0],
**layer_dict
}
else:
l = layer_dict
return clz(**l)
@property
def shape(self):
return (self.side, self.side, self.num * 5 + self.classes)
def to_tf(self, tensors):
from tensorflow.keras.layers import Reshape
shape = (self.side, self.side, self.num * 5 + self.classes)
layer = Reshape(shape)
return layer(tensors[-1])
@layer_builder.register('dropout')
@dataclass
class dropoutCFG(Config):
_type: str = None
w: int = field(init=True, default=1715)
h: int = field(init=True, default=0)
c: int = field(init=True, default=0)
probability: int = field(init=True, default=0.5)
@property
def shape(self):
return (self.w, self.h, self.c)
def to_tf(self, tensors):
from tensorflow.keras.layers import Dropout
dropout = Dropout(rate=self.probability)
return dropout(tensors[-1])
def len_width(n, f, p, s):
'''
n: height or width
f: kernels height or width
p: padding
s: strides height or width
'''
return int(((n + 2 * p - f) / s) + 1)
def len_width_up(n, f, p, s):
'''
n: height or width
f: kernels height or width
p: padding
s: strides height or width
'''
return int(((n - 1) * s - 2 * p + (f - 1)) + 1)
def read_n_floats(n, bfile):
"""c style read n float 32"""
return np.fromfile(bfile, 'f4', n)
def read_n_int(n, bfile, unsigned=False):
"""c style read n int 32"""
dtype = '<u4' if unsigned else '<i4'
return np.fromfile(bfile, dtype, n)
def read_n_long(n, bfile, unsigned=False):
"""c style read n int 64"""
dtype = '<u8' if unsigned else '<i8'
return np.fromfile(bfile, dtype, n)
def activation_function_dn_to_keras_name(dn):
return dn
def get_primitive_tf_layer_name(var, piece=3):
name = var.name
parts = name.rsplit('/', piece)
if len(parts) < piece:
return None
token = parts[-piece]
cid = []
while True:
try:
name, count = token.rsplit('_', 1)
except:
break
try:
cid.append(int(count))
except:
break
else:
token = name
if token[0].upper() == token[0]:
return cid, token
return cid, ''.join([x.capitalize() for x in token.split('_')])
# Testing locally connected config:
if __name__ == "__main__":
config_path = "yolo/utils/_darknet2tf/test_locally_connected_config.cfg"
weights_path = "D:/yolov1.weights"
from yolo.utils import DarkNetConverter
converter = DarkNetConverter()
x = converter.read(config_file=config_path, weights_file=weights_path)
print("Weights loaded successfully")
x = x.to_tf()
print("Layers converted to TF successfully")
"Convert a DarkNet config file into a Python literal file in a list of dictionaries format"
import collections
import configparser
import io
import sys
from typing import Dict, List
if sys.version_info < (3, 10):
# shim for Python 3.9 and older
from more_itertools import zip_equal
def zip(*iterables, strict=False):
if strict:
return zip_equal(*iterables)
else:
return __builtins__.zip(*iterables)
def _parseValue(key, val):
"""
Parse non-string literals found in darknet config files
"""
if ',' in val:
vals = val.split(',')
raw_list = tuple(_parseValue(key, v) for v in vals)
if key == 'anchors':
# Group the anchors list into pairs
# https://docs.python.org/3.10/library/functions.html#zip
raw_list = list(zip(*[iter(raw_list)] * 2, strict=True))
return raw_list
else:
if '.' in val:
try:
return float(val.strip())
except ValueError:
return val
else:
try:
return int(val.strip())
except ValueError:
return val
class multidict(collections.OrderedDict):
"""
A dict subclass that allows for multiple sections in a config file to share
names.
From: https://stackoverflow.com/a/9888814
"""
_unique = 0 # class variable
def __setitem__(self, key, val):
if isinstance(val, dict):
# This should only happen at the top-most level
self._unique += 1
val['_type'] = key
key = self._unique
elif isinstance(val, str):
val = _parseValue(key, val)
super().__setitem__(key, val)
class DNConfigParser(configparser.RawConfigParser):
def __init__(self, **kwargs):
super().__init__(defaults=None,
dict_type=multidict,
strict=False,
**kwargs)
def as_list(self) -> List[Dict[str, str]]:
"""
Converts a ConfigParser object into a dictionary.
The resulting dictionary has sections as keys which point to a dict of the
sections options as key => value pairs.
"""
the_list = []
for section in self.sections():
the_list.append(dict(self.items(section)))
return the_list
def as_dict(self) -> Dict[str, Dict[str, str]]:
"""
Converts a ConfigParser object into a dictionary.
The resulting dictionary has sections as keys which point to a dict of the
sections options as key => value pairs.
https://stackoverflow.com/a/23944270
"""
the_dict = {}
for section in self.sections():
the_dict[section] = dict(self.items(section))
return the_dict
def convertConfigFile(configfile):
parser = DNConfigParser()
if isinstance(configfile, io.IOBase):
if hasattr(configfile, 'name'):
print(configfile.name)
parser.read_file(configfile, source=configfile.name)
else:
parser.read_file(configfile)
else:
parser.read(configfile)
return parser.as_list()
#!/usr/bin/env python3
"Number the blocks in a DarkNet config file"
from absl import app, flags
from absl.flags import argparse_flags
import argparse
def _makeParser(parser):
parser.add_argument('filename',
default=None,
help='name of the config file. Defaults to YOLOv3',
nargs='?',
type=argparse.FileType('r'))
_parser = argparse_flags.ArgumentParser()
_makeParser(_parser)
def numberConfig(file):
i = 0
for line in file:
if line.startswith('[') and 'net' not in line:
print(f"{i:4d}|{line}", end='')
i += 1
else:
print(f" |{line}", end='')
def main(argv, args=None):
if args is None:
args = _parser.parse_args(argv[1:])
filename = args.filename
if filename is None:
from ..file_manager import download
with open(download('yolov3.cfg')) as file:
numberConfig(file)
else:
numberConfig(filename)
if __name__ == '__main__':
app.run(main)
"""
This file contains the code to load parsed weights that are in the DarkNet
format into TensorFlow layers
"""
import itertools
from tensorflow import keras as ks
from yolo.modeling.building_blocks import DarkConv
def split_converter(lst, i, j=None):
if j is None:
return lst.data[:i], lst.data[i:]
return lst.data[:i], lst.data[i:j], lst.data[j:]
def interleve_weights(block):
"""merge weights to fit the DarkResnet block style"""
if len(block) == 0:
return []
weights_temp = []
for layer in block:
weights = layer.get_weights()
weights = [tuple(weights[0:3]), tuple(weights[3:])]
weights_temp.append(weights)
top, bottom = tuple(zip(*weights_temp))
weights = list(itertools.chain.from_iterable(top)) + \
list(itertools.chain.from_iterable(bottom))
return weights
def get_darknet53_tf_format(net, only_weights=True):
"""convert weights from darknet sequntial to tensorflow weave, Darknet53 Backbone"""
combo_blocks = []
for i in range(2):
layer = net.pop(0)
combo_blocks.append(layer)
# ugly code i will document, very tired
encoder = []
while len(net) != 0:
blocks = []
layer = net.pop(0)
while layer._type != "shortcut":
blocks.append(layer)
layer = net.pop(0)
encoder.append(blocks)
new_net = combo_blocks + encoder
weights = []
if only_weights:
for block in new_net:
if type(block) != list:
weights.append(block.get_weights())
else:
weights.append(interleve_weights(block))
print("converted/interleved weights for tensorflow format")
return new_net, weights
def get_tiny_tf_format(encoder):
weights = []
for layer in encoder:
if layer._type != "maxpool":
weights.append(layer.get_weights())
return encoder, weights
def load_weights_dnBackbone(backbone, encoder, mtype="darknet53"):
# get weights for backbone
if mtype == "darknet53":
encoder, weights_encoder = get_darknet53_tf_format(encoder[:])
elif mtype == "darknet_tiny":
encoder, weights_encoder = get_tiny_tf_format(encoder[:])
# set backbone weights
print(
f"\nno. layers: {len(backbone.layers)}, no. weights: {len(weights_encoder)}"
)
set_darknet_weights(backbone, weights_encoder)
backbone.trainable = False
print(f"\nsetting backbone.trainable to: {backbone.trainable}\n")
return
def load_weights_dnHead(head, decoder, v4=True):
# get weights for head
decoder, weights_decoder, head_layers, head_weights = get_decoder_weights(
decoder)
# set detection head weights
print(
f"\nno. layers: {len(head.layers)}, no. weights: {len(weights_decoder)}"
)
flat_full = list(flatten_model(head, r_list=False))
flat_main = flat_full[:-3]
flat_head = flat_full[-3:]
# not the right way to do it
if v4:
flat_main.insert(1, flat_main[-1])
print(len(flat_main), len(decoder))
print(len(flat_head), len(head_layers))
set_darknet_weights(head, weights_decoder, flat_model=flat_main)
set_darknet_weights_head(flat_head, head_weights)
head.trainable = False
print(f"\nsetting head.trainable to: {head.trainable}\n")
return
# DEBUGGING
def print_layer_shape(layer):
try:
weights = layer.get_weights()
except:
weights = layer
for item in weights:
print(item.shape)
return
def flatten_model(model, r_list=True):
for layer in model.layers:
if r_list and isinstance(model, ks.Model):
yield from model.layers
else:
yield layer
def set_darknet_weights_head(flat_head, weights_head):
for layer in flat_head:
weights = layer.get_weights()
for weight in weights:
print(weight.shape)
weight_depth = weights[0].shape[-2]
for weight in weights_head:
if weight[0].shape[-2] == weight_depth:
print(
f"loaded weights for layer: head layer with depth {weight_depth} -> name: {layer.name}",
sep=' ',
end="\r")
layer.set_weights(weight)
return
def set_darknet_weights(model, weights_list, flat_model=None):
if flat_model == None:
zip_fill = flatten_model(model)
else:
zip_fill = flat_model
for i, (layer, weights) in enumerate(zip(zip_fill, weights_list)):
print(layer.name, len(weights))
#layer.set_weights(weights)
return
def split_decoder(lst):
decoder = []
outputs = []
for layer in lst:
if layer._type == 'yolo':
outputs.append(decoder.pop())
outputs.append(layer)
else:
decoder.append(layer)
return decoder, outputs
def get_decoder_weights(decoder):
layers = [[]]
block = []
weights = []
decoder, head = split_decoder(decoder)
# get decoder weights and group them together
for i, layer in enumerate(decoder):
if layer._type == "route" and len(
layer.layers) >= 2 and decoder[i - 1]._type != 'maxpool':
layers.append([])
layers.append(block)
block = []
elif layer._type == "route" and decoder[i - 1]._type != 'maxpool':
layers.append(block)
block = []
elif (layer._type == "route" and decoder[i - 1]._type
== "maxpool") or layer._type == "maxpool":
# made only for spp
continue
elif layer._type == "convolutional":
block.append(layer)
# else:
# # if you upsample
# layers.append([])
if len(block) > 0:
layers.append(block)
# interleve weights for blocked layers
for layer in layers:
weights.append(interleve_weights(layer))
# get weights for output detection heads
head_weights = []
head_layers = []
for layer in (head):
if layer != None and layer._type == "convolutional":
head_weights.append(layer.get_weights())
head_layers.append(layer)
return layers, weights, head_layers, head_weights
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment