Commit 08ec7955 authored by anivegesana's avatar anivegesana
Browse files

Remove dead files

parent 230732b2
This diff is collapsed.
runtime:
distribution_strategy: 'mirrored'
mixed_precision_dtype: 'float16'
loss_scale: 'dynamic'
num_gpus: 2
task:
annotation_file: Null
init_checkpoint: Null
model:
num_classes: 80
input_size: [640, 640, 3]
min_level: 3
max_level: 7
losses:
l2_weight_decay: 0.0001
train_data:
input_path: Null
tfds_name: 'coco/2017'
tfds_split: 'train'
tfds_download: True
is_training: True
global_batch_size: 16
dtype: 'float16'
cycle_length: 5
decoder:
type: tfds_decoder
shuffle_buffer_size: 2
validation_data:
input_path: Null
tfds_name: 'coco/2017'
tfds_split: 'validation'
tfds_download: True
# tfds_skip_decoding_feature: source_id,image,height,width,groundtruth_classes,groundtruth_is_crowd,groundtruth_area,groundtruth_boxes
is_training: False
global_batch_size: 16
dtype: 'float16'
cycle_length: 10
decoder:
type: tfds_decoder
shuffle_buffer_size: 2
trainer:
train_steps: 532224
validation_steps: 1564
validation_interval: 2000
steps_per_loop: 200 #59136
summary_interval: 200 #59136
checkpoint_interval: 10000
optimizer_config:
optimizer:
type: 'sgd'
sgd:
momentum: 0.9
# learning_rate:
# type: 'cosine'
# cosine:
# initial_learning_rate: 0.0021875
# decay_steps: 4257792
# alpha: 0.01
# Stepwise version
learning_rate:
type: 'stepwise'
stepwise:
# boundaries: [26334, 30954]
boundaries: [421344, 495264]
# values: [0.28, 0.028, 0.0028]
values: [0.0175, 0.00175, 0.000175]
warmup:
type: 'linear'
linear:
warmup_steps: 20480
warmup_learning_rate: 0.0001634375
This diff is collapsed.
......@@ -19,4 +19,4 @@ from official.modeling.activations.sigmoid import hard_sigmoid
from official.modeling.activations.swish import hard_swish
from official.modeling.activations.swish import identity
from official.modeling.activations.swish import simple_swish
from official.modeling.activations.swish import simple_swish
\ No newline at end of file
......@@ -15,9 +15,7 @@
"""All necessary imports for registration."""
# pylint: disable=unused-import
from official.nlp import tasks as nlp_task
from official.utils.testing import mock_task
from official.vision import beta
from official.common import registry_imports
from official.vision.beta.projects import yolo
from official.vision.beta.projects.yolo.modeling.backbones import darknet
......
......@@ -20,7 +20,7 @@ from official.core import input_reader
from official.core import task_factory
from official.modeling import tf_utils
from official.vision.beta.projects.yolo.configs import darknet_classification as exp_cfg
from official.vision.beta.projects.yolo.dataloaders import classification_input as cli
from official.vision.beta.projects.yolo.dataloaders import classification_tfds_decoder as cli
from official.vision.beta.dataloaders import classification_input
from official.vision.beta.modeling import factory
from official.vision.beta.tasks import image_classification
......
......@@ -35,8 +35,6 @@ FLAGS = flags.FLAGS
def main(_):
gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_params)
params = train_utils.parse_configuration(FLAGS)
import pprint
pprint.pprint(params.as_dict())
model_dir = FLAGS.model_dir
if 'train' in FLAGS.mode:
# Pure eval modes do not output yaml files. Otherwise continuous eval job
......
......@@ -81,6 +81,7 @@ def make_distributed_dataset(strategy, dataset_or_fn, *args, **kwargs):
if "input_context" in arg_names:
kwargs["input_context"] = input_context
return dataset_or_fn(*args, **kwargs)
return strategy.distribute_datasets_from_function(dataset_fn)
......
runtime:
distribution_strategy: 'mirrored'
mixed_precision_dtype: 'float32'
loss_scale: 'dynamic'
num_gpus: 1
task:
init_checkpoint: Null
model:
num_classes: 80
input_size: [640, 640, 3]
min_level: 3
max_level: 7
losses:
l2_weight_decay: 0.0001
train_data:
input_path: Null
tfds_name: 'coco/2017'
tfds_split: 'train'
tfds_download: True
is_training: True
global_batch_size: 2
dtype: 'float16'
cycle_length: 5
decoder:
type: tfds_decoder
shuffle_buffer_size: 2
validation_data:
input_path: Null
tfds_name: 'coco/2017'
tfds_split: 'validation'
tfds_download: True
# tfds_skip_decoding_feature: source_id,image,height,width,groundtruth_classes,groundtruth_is_crowd,groundtruth_area,groundtruth_boxes
is_training: False
global_batch_size: 2
dtype: 'float16'
cycle_length: 10
decoder:
type: tfds_decoder
shuffle_buffer_size: 2
trainer:
train_steps: 4257792
validation_steps: 2500
validation_interval: 5000
steps_per_loop: 100 #59136
summary_interval: 100 #59136
checkpoint_interval: 59136
optimizer_config:
optimizer:
type: 'sgd'
sgd:
momentum: 0.9
# learning_rate:
# type: 'cosine'
# cosine:
# initial_learning_rate: 0.0021875
# decay_steps: 4257792
# alpha: 0.01
# Stepwise version
learning_rate:
type: 'stepwise'
stepwise:
# boundaries: [26334, 30954]
boundaries: [3370752, 3962112]
# values: [0.28, 0.028, 0.0028]
values: [0.0021875, 0.00021875, 0.000021875]
warmup:
type: 'linear'
linear:
warmup_steps: 64000
warmup_learning_rate: 0.0000523
import tensorflow_datasets as tfds
import tensorflow as tf
from official.vision.beta.dataloaders import decoder
import matplotlib.pyplot as plt
import cv2
class TfdsExampleDecoder(decoder.Decoder):
"""Tensorflow Dataset Example proto decoder."""
def __init__(self,
include_mask=False,
regenerate_source_id=False):
self._include_mask = include_mask
self._regenerate_source_id = regenerate_source_id
def decode(self, serialized_example):
"""Decode the serialized example.
Args:
serialized_example: a single serialized tf.Example string.
Returns:
decoded_tensors: a dictionary of tensors with the following fields:
- source_id: a string scalar tensor.
- image: a uint8 tensor of shape [None, None, 3].
- height: an integer scalar tensor.
- width: an integer scalar tensor.
- groundtruth_classes: a int64 tensor of shape [None].
- groundtruth_is_crowd: a bool tensor of shape [None].
- groundtruth_area: a float32 tensor of shape [None].
- groundtruth_boxes: a float32 tensor of shape [None, 4].
- groundtruth_instance_masks: a float32 tensor of shape
[None, None, None].
- groundtruth_instance_masks_png: a string tensor of shape [None].
"""
decoded_tensors = {
'source_id': serialized_example['image/id'],
'image': serialized_example['image'],
'height': tf.shape(serialized_example['image'])[0],
'width': tf.shape(serialized_example['image'])[1],
'groundtruth_classes': serialized_example['objects']['label'],
'groundtruth_is_crowd': serialized_example['objects']['is_crowd'],
'groundtruth_area': serialized_example['objects']['area'],
'groundtruth_boxes': serialized_example['objects']['bbox'],
}
return decoded_tensors
runtime:
all_reduce_alg: null
batchnorm_spatial_persistent: false
dataset_num_private_threads: null
default_shard_dim: -1
distribution_strategy: mirrored
enable_xla: false
gpu_thread_mode: null
loss_scale: dynamic
mixed_precision_dtype: float16
num_cores_per_replica: 1
num_gpus: 2
num_packs: 1
per_gpu_thread_count: 0
run_eagerly: false
task_index: -1
tpu: null
worker_hosts: null
task:
gradient_clip_norm: 0.0
init_checkpoint: ''
logging_dir: null
losses:
l2_weight_decay: 0.0005
label_smoothing: 0.0
one_hot: true
model:
add_head_batch_norm: false
backbone:
darknet:
model_id: cspdarknet53
type: darknet
dropout_rate: 0.0
input_size: [256, 256, 3]
norm_activation:
activation: mish
norm_epsilon: 0.001
norm_momentum: 0.99
use_sync_bn: false
num_classes: 1001
train_data:
block_length: 1
cache: false
cycle_length: 10
deterministic: null
drop_remainder: true
dtype: float16
enable_tf_data_service: false
global_batch_size: 16
input_path: ''
is_training: true
sharding: true
shuffle_buffer_size: 100
tf_data_service_address: null
tf_data_service_job_name: null
tfds_as_supervised: false
tfds_data_dir: ~/tensorflow_datasets/
tfds_download: true
tfds_name: imagenet2012
tfds_skip_decoding_feature: ''
tfds_split: train
validation_data:
block_length: 1
cache: false
cycle_length: 10
deterministic: null
drop_remainder: false
dtype: float16
enable_tf_data_service: false
global_batch_size: 16
input_path: ''
is_training: true
sharding: true
shuffle_buffer_size: 100
tf_data_service_address: null
tf_data_service_job_name: null
tfds_as_supervised: false
tfds_data_dir: ~/tensorflow_datasets/
tfds_download: true
tfds_name: imagenet2012
tfds_skip_decoding_feature: ''
tfds_split: validation
trainer:
allow_tpu_summary: false
best_checkpoint_eval_metric: ''
best_checkpoint_export_subdir: ''
best_checkpoint_metric_comp: higher
checkpoint_interval: 10000
continuous_eval_timeout: 3600
eval_tf_function: true
max_to_keep: 5
optimizer_config:
ema: null
learning_rate:
polynomial:
cycle: false
decay_steps: 9592000
end_learning_rate: 1.25e-05
initial_learning_rate: 0.0125
name: PolynomialDecay
power: 4.0
type: polynomial
optimizer:
sgd:
clipnorm: null
clipvalue: null
decay: 0.0
momentum: 0.9
name: SGD
nesterov: false
type: sgd
warmup:
linear:
name: linear
warmup_learning_rate: 0
warmup_steps: 8000
type: linear
steps_per_loop: 10000
summary_interval: 10000
train_steps: 9600000
train_tf_function: true
train_tf_while_loop: true
validation_interval: 10000
validation_steps: 3200
import collections
import collections.abc
import io
from ..file_manager import PathABC
from typing import Union, Type, TypeVar
T = TypeVar('T', bound='DarkNetModel')
class _DarkNetSectionList(collections.abc.MutableSequence):
__slots__ = ['data']
def __init__(self, initlist=None):
self.data = []
if initlist is not None:
self.data = list(initlist)
@property
def net(self):
return self.data[0]
# Overriding Python list operations
def __len__(self):
return max(0, len(self.data) - 1)
def __getitem__(self, i):
if i >= 0:
i += 1
if isinstance(i, slice):
return self.__class__(self.data[i])
else:
return self.data[i]
def __setitem__(self, i, item):
if i >= 0:
i += 1
self.data[i] = item
def __delitem__(self, i):
if i >= 0:
i += 1
del self.data[i]
def insert(self, i, item):
if i >= 0:
i += 1
self.data.insert(i, item)
class DarkNetConverter(_DarkNetSectionList):
"""
This is a special list-like object to handle the storage of layers in a
model that is defined in the DarkNet format. Note that indexing layers in a
DarkNet model can be unintuitive and doesn't follow the same conventions
as a Python list.
In DarkNet, a [net] section is at the top of every model definition. This
section defines the input and training parameters for the entire model.
As such, it is not a layer and cannot be referenced directly. For our
convenience, we allowed relative references to [net] but disallowed absolute
ones. Like the DarkNet implementation, our implementation numbers the first
layer (after [net]) with a 0 and
To use conventional list operations on the DarkNetConverter object, use the
data property provided by this class.
"""
@classmethod
def read(
clz: Type[T],
config_file: Union[PathABC, io.TextIOBase],
weights_file: Union[PathABC, io.RawIOBase,
io.BufferedIOBase] = None) -> T:
"""
Parse the config and weights files and read the DarkNet layer's encoder,
decoder, and output layers. The number of bytes in the file is also returned.
Args:
config_file: str, path to yolo config file from Darknet
weights_file: str, path to yolo weights file from Darknet
Returns:
a DarkNetConverter object
"""
from .read_weights import read_weights
full_net = clz()
read_weights(full_net, config_file, weights_file)
return full_net
def to_tf(self,
thresh=0.45,
class_thresh=0.45,
max_boxes=200,
use_mixed=True):
import tensorflow as tf
tensors = _DarkNetSectionList()
layers = _DarkNetSectionList()
yolo_tensors = []
for i, cfg in enumerate(self.data):
tensor = cfg.to_tf(tensors)
# Handle weighted layers
if type(tensor) is tuple:
tensor, layer = tensor
else:
layer = None
assert tensor.shape[1:] == cfg.shape, str(
cfg
) + f" shape inconsistent\n\tExpected: {cfg.shape}\n\tGot: {tensor.shape[1:]}"
if cfg._type == 'yolo':
yolo_tensors.append((i, cfg, tensor))
tensors.append(tensor)
layers.append(layer)
model = tf.keras.Model(inputs=tensors.net,
outputs=self._process_yolo_layer(
yolo_tensors,
thresh=thresh,
class_thresh=class_thresh,
max_boxes=max_boxes,
use_mixed=use_mixed))
model.build(self.net.shape)
for cfg, layer in zip(self, layers):
if layer is not None:
layer.set_weights(cfg.get_weights())
return model
def _process_yolo_layer(self,
yolo_tensors,
thresh=0.45,
class_thresh=0.45,
max_boxes=200,
use_mixed=True):
import tensorflow as tf
from yolo.modeling.building_blocks import YoloLayer
if use_mixed:
from tensorflow.keras.mixed_precision import experimental as mixed_precision
# using mixed type policy give better performance than strictly float32
policy = mixed_precision.Policy('mixed_float16')
mixed_precision.set_policy(policy)
dtype = policy.compute_dtype
else:
dtype = tf.float32
outs = collections.OrderedDict()
masks = {}
anchors = None
scale_x_y = 1
path_scales = {}
for i, yolo_cfg, yolo_tensor in yolo_tensors:
masks[yolo_tensor.name] = yolo_cfg.mask
if anchors is None:
anchors = yolo_cfg.anchors
elif anchors != yolo_cfg.anchors:
raise ValueError('Anchors inconsistent in [yolo] layers')
if scale_x_y is None:
scale_x_y = yolo_cfg.scale_x_y
elif scale_x_y != yolo_cfg.scale_x_y:
raise ValueError('Scale inconsistent in [yolo] layers')
outs[yolo_tensor.name] = yolo_tensor
path_scales[yolo_tensor.name] = self.data[i - 1].c >> 5
yolo_layer = YoloLayer(
masks=masks,
anchors=anchors,
thresh=thresh,
cls_thresh=class_thresh,
max_boxes=max_boxes,
dtype=dtype,
#scale_boxes=self.net.w,
scale_xy=scale_x_y,
path_scale=path_scales)
return yolo_layer(outs)
#!/usr/bin/env python3
"Convert a DarkNet config file and weights into a TensorFlow model"
from absl import flags as _flags
from absl.flags import argparse_flags as _argparse_flags
import argparse as _argparse
_flags.DEFINE_boolean('weights_only', False,
'Save only the weights and not the entire model.')
from . import DarkNetConverter
def _makeParser(parser):
parser.add_argument('cfg',
default=None,
help='name of the config file. Defaults to YOLOv3',
type=_argparse.FileType('r'),
nargs='?')
parser.add_argument('weights',
default=None,
help='name of the weights file. Defaults to YOLOv3',
type=_argparse.FileType('rb'),
nargs='?')
parser.add_argument(
'output', help='name of the location to save the generated model')
def main(argv, args=None):
from ..file_manager import download
import os
if args is None:
args = _parser.parse_args(argv[1:])
cfg = args.cfg
weights = args.weights
output = args.output
if cfg is None:
cfg = download('yolov3.cfg')
if weights is None:
weights = download('yolov3.weights')
model = DarkNetConverter.read(cfg, weights).to_tf()
if output != os.devnull:
if flags.FLAGS.weights_only:
model.save_weights(output)
else:
model.save(output)
_parser = _argparse_flags.ArgumentParser()
_makeParser(_parser)
from absl import app
import sys
from . import main, _parser
if __name__ == '__main__':
# I dislike Abseil's current help menu. I like the default Python one
# better
if '-h' in sys.argv or '--help' in sys.argv:
_parser.parse_args(sys.argv[1:])
exit()
app.run(main)
This diff is collapsed.
"Convert a DarkNet config file into a Python literal file in a list of dictionaries format"
import collections
import configparser
import io
import sys
from typing import Dict, List
if sys.version_info < (3, 10):
# shim for Python 3.9 and older
from more_itertools import zip_equal
def zip(*iterables, strict=False):
if strict:
return zip_equal(*iterables)
else:
return __builtins__.zip(*iterables)
def _parseValue(key, val):
"""
Parse non-string literals found in darknet config files
"""
if ',' in val:
vals = val.split(',')
raw_list = tuple(_parseValue(key, v) for v in vals)
if key == 'anchors':
# Group the anchors list into pairs
# https://docs.python.org/3.10/library/functions.html#zip
raw_list = list(zip(*[iter(raw_list)] * 2, strict=True))
return raw_list
else:
if '.' in val:
try:
return float(val.strip())
except ValueError:
return val
else:
try:
return int(val.strip())
except ValueError:
return val
class multidict(collections.OrderedDict):
"""
A dict subclass that allows for multiple sections in a config file to share
names.
From: https://stackoverflow.com/a/9888814
"""
_unique = 0 # class variable
def __setitem__(self, key, val):
if isinstance(val, dict):
# This should only happen at the top-most level
self._unique += 1
val['_type'] = key
key = self._unique
elif isinstance(val, str):
val = _parseValue(key, val)
super().__setitem__(key, val)
class DNConfigParser(configparser.RawConfigParser):
def __init__(self, **kwargs):
super().__init__(defaults=None,
dict_type=multidict,
strict=False,
**kwargs)
def as_list(self) -> List[Dict[str, str]]:
"""
Converts a ConfigParser object into a dictionary.
The resulting dictionary has sections as keys which point to a dict of the
sections options as key => value pairs.
"""
the_list = []
for section in self.sections():
the_list.append(dict(self.items(section)))
return the_list
def as_dict(self) -> Dict[str, Dict[str, str]]:
"""
Converts a ConfigParser object into a dictionary.
The resulting dictionary has sections as keys which point to a dict of the
sections options as key => value pairs.
https://stackoverflow.com/a/23944270
"""
the_dict = {}
for section in self.sections():
the_dict[section] = dict(self.items(section))
return the_dict
def convertConfigFile(configfile):
parser = DNConfigParser()
if isinstance(configfile, io.IOBase):
if hasattr(configfile, 'name'):
print(configfile.name)
parser.read_file(configfile, source=configfile.name)
else:
parser.read_file(configfile)
else:
parser.read(configfile)
return parser.as_list()
#!/usr/bin/env python3
"Number the blocks in a DarkNet config file"
from absl import app, flags
from absl.flags import argparse_flags
import argparse
def _makeParser(parser):
parser.add_argument('filename',
default=None,
help='name of the config file. Defaults to YOLOv3',
nargs='?',
type=argparse.FileType('r'))
_parser = argparse_flags.ArgumentParser()
_makeParser(_parser)
def numberConfig(file):
i = 0
for line in file:
if line.startswith('[') and 'net' not in line:
print(f"{i:4d}|{line}", end='')
i += 1
else:
print(f" |{line}", end='')
def main(argv, args=None):
if args is None:
args = _parser.parse_args(argv[1:])
filename = args.filename
if filename is None:
from ..file_manager import download
with open(download('yolov3.cfg')) as file:
numberConfig(file)
else:
numberConfig(filename)
if __name__ == '__main__':
app.run(main)
"""
This file contains the code to load parsed weights that are in the DarkNet
format into TensorFlow layers
"""
import itertools
from tensorflow import keras as ks
from yolo.modeling.building_blocks import DarkConv
def split_converter(lst, i, j=None):
if j is None:
return lst.data[:i], lst.data[i:]
return lst.data[:i], lst.data[i:j], lst.data[j:]
def interleve_weights(block):
"""merge weights to fit the DarkResnet block style"""
if len(block) == 0:
return []
weights_temp = []
for layer in block:
weights = layer.get_weights()
weights = [tuple(weights[0:3]), tuple(weights[3:])]
weights_temp.append(weights)
top, bottom = tuple(zip(*weights_temp))
weights = list(itertools.chain.from_iterable(top)) + \
list(itertools.chain.from_iterable(bottom))
return weights
def get_darknet53_tf_format(net, only_weights=True):
"""convert weights from darknet sequntial to tensorflow weave, Darknet53 Backbone"""
combo_blocks = []
for i in range(2):
layer = net.pop(0)
combo_blocks.append(layer)
# ugly code i will document, very tired
encoder = []
while len(net) != 0:
blocks = []
layer = net.pop(0)
while layer._type != "shortcut":
blocks.append(layer)
layer = net.pop(0)
encoder.append(blocks)
new_net = combo_blocks + encoder
weights = []
if only_weights:
for block in new_net:
if type(block) != list:
weights.append(block.get_weights())
else:
weights.append(interleve_weights(block))
print("converted/interleved weights for tensorflow format")
return new_net, weights
def get_tiny_tf_format(encoder):
weights = []
for layer in encoder:
if layer._type != "maxpool":
weights.append(layer.get_weights())
return encoder, weights
def load_weights_dnBackbone(backbone, encoder, mtype="darknet53"):
# get weights for backbone
if mtype == "darknet53":
encoder, weights_encoder = get_darknet53_tf_format(encoder[:])
elif mtype == "darknet_tiny":
encoder, weights_encoder = get_tiny_tf_format(encoder[:])
# set backbone weights
print(
f"\nno. layers: {len(backbone.layers)}, no. weights: {len(weights_encoder)}"
)
set_darknet_weights(backbone, weights_encoder)
backbone.trainable = False
print(f"\nsetting backbone.trainable to: {backbone.trainable}\n")
return
def load_weights_dnHead(head, decoder, v4=True):
# get weights for head
decoder, weights_decoder, head_layers, head_weights = get_decoder_weights(
decoder)
# set detection head weights
print(
f"\nno. layers: {len(head.layers)}, no. weights: {len(weights_decoder)}"
)
flat_full = list(flatten_model(head, r_list=False))
flat_main = flat_full[:-3]
flat_head = flat_full[-3:]
# not the right way to do it
if v4:
flat_main.insert(1, flat_main[-1])
print(len(flat_main), len(decoder))
print(len(flat_head), len(head_layers))
set_darknet_weights(head, weights_decoder, flat_model=flat_main)
set_darknet_weights_head(flat_head, head_weights)
head.trainable = False
print(f"\nsetting head.trainable to: {head.trainable}\n")
return
# DEBUGGING
def print_layer_shape(layer):
try:
weights = layer.get_weights()
except:
weights = layer
for item in weights:
print(item.shape)
return
def flatten_model(model, r_list=True):
for layer in model.layers:
if r_list and isinstance(model, ks.Model):
yield from model.layers
else:
yield layer
def set_darknet_weights_head(flat_head, weights_head):
for layer in flat_head:
weights = layer.get_weights()
for weight in weights:
print(weight.shape)
weight_depth = weights[0].shape[-2]
for weight in weights_head:
if weight[0].shape[-2] == weight_depth:
print(
f"loaded weights for layer: head layer with depth {weight_depth} -> name: {layer.name}",
sep=' ',
end="\r")
layer.set_weights(weight)
return
def set_darknet_weights(model, weights_list, flat_model=None):
if flat_model == None:
zip_fill = flatten_model(model)
else:
zip_fill = flat_model
for i, (layer, weights) in enumerate(zip(zip_fill, weights_list)):
print(layer.name, len(weights))
#layer.set_weights(weights)
return
def split_decoder(lst):
decoder = []
outputs = []
for layer in lst:
if layer._type == 'yolo':
outputs.append(decoder.pop())
outputs.append(layer)
else:
decoder.append(layer)
return decoder, outputs
def get_decoder_weights(decoder):
layers = [[]]
block = []
weights = []
decoder, head = split_decoder(decoder)
# get decoder weights and group them together
for i, layer in enumerate(decoder):
if layer._type == "route" and len(
layer.layers) >= 2 and decoder[i - 1]._type != 'maxpool':
layers.append([])
layers.append(block)
block = []
elif layer._type == "route" and decoder[i - 1]._type != 'maxpool':
layers.append(block)
block = []
elif (layer._type == "route" and decoder[i - 1]._type
== "maxpool") or layer._type == "maxpool":
# made only for spp
continue
elif layer._type == "convolutional":
block.append(layer)
# else:
# # if you upsample
# layers.append([])
if len(block) > 0:
layers.append(block)
# interleve weights for blocked layers
for layer in layers:
weights.append(interleve_weights(layer))
# get weights for output detection heads
head_weights = []
head_layers = []
for layer in (head):
if layer != None and layer._type == "convolutional":
head_weights.append(layer.get_weights())
head_layers.append(layer)
return layers, weights, head_layers, head_weights
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment