Commit e0ada7c6 authored by anivegesana's avatar anivegesana
Browse files

Remove dead files again

parent 08ec7955
...@@ -16,7 +16,6 @@ ...@@ -16,7 +16,6 @@
from official.modeling.activations.gelu import gelu from official.modeling.activations.gelu import gelu
from official.modeling.activations.relu import relu6 from official.modeling.activations.relu import relu6
from official.modeling.activations.sigmoid import hard_sigmoid from official.modeling.activations.sigmoid import hard_sigmoid
from official.modeling.activations.swish import hard_swish from official.modeling.activations.swish import hard_swish
from official.modeling.activations.swish import identity from official.modeling.activations.swish import identity
from official.modeling.activations.swish import simple_swish from official.modeling.activations.swish import simple_swish
\ No newline at end of file
...@@ -53,7 +53,6 @@ class ImageClassificationTask(base_task.Task): ...@@ -53,7 +53,6 @@ class ImageClassificationTask(base_task.Task):
input_size = self.task_config.model.input_size input_size = self.task_config.model.input_size
decoder = classification_input.Decoder() decoder = classification_input.Decoder()
parser = classification_input.Parser( parser = classification_input.Parser(
output_size=input_size[:2], output_size=input_size[:2],
num_classes=num_classes, num_classes=num_classes,
......
...@@ -82,7 +82,6 @@ def make_distributed_dataset(strategy, dataset_or_fn, *args, **kwargs): ...@@ -82,7 +82,6 @@ def make_distributed_dataset(strategy, dataset_or_fn, *args, **kwargs):
kwargs["input_context"] = input_context kwargs["input_context"] = input_context
return dataset_or_fn(*args, **kwargs) return dataset_or_fn(*args, **kwargs)
return strategy.distribute_datasets_from_function(dataset_fn) return strategy.distribute_datasets_from_function(dataset_fn)
......
import collections
import collections.abc
import io
from ..file_manager import PathABC
from typing import Union, Type, TypeVar
T = TypeVar('T', bound='DarkNetModel')
class _DarkNetSectionList(collections.abc.MutableSequence):
__slots__ = ['data']
def __init__(self, initlist=None):
self.data = []
if initlist is not None:
self.data = list(initlist)
@property
def net(self):
return self.data[0]
# Overriding Python list operations
def __len__(self):
return max(0, len(self.data) - 1)
def __getitem__(self, i):
if i >= 0:
i += 1
if isinstance(i, slice):
return self.__class__(self.data[i])
else:
return self.data[i]
def __setitem__(self, i, item):
if i >= 0:
i += 1
self.data[i] = item
def __delitem__(self, i):
if i >= 0:
i += 1
del self.data[i]
def insert(self, i, item):
if i >= 0:
i += 1
self.data.insert(i, item)
class DarkNetConverter(_DarkNetSectionList):
"""
This is a special list-like object to handle the storage of layers in a
model that is defined in the DarkNet format. Note that indexing layers in a
DarkNet model can be unintuitive and doesn't follow the same conventions
as a Python list.
In DarkNet, a [net] section is at the top of every model definition. This
section defines the input and training parameters for the entire model.
As such, it is not a layer and cannot be referenced directly. For our
convenience, we allowed relative references to [net] but disallowed absolute
ones. Like the DarkNet implementation, our implementation numbers the first
layer (after [net]) with a 0 and
To use conventional list operations on the DarkNetConverter object, use the
data property provided by this class.
"""
@classmethod
def read(
clz: Type[T],
config_file: Union[PathABC, io.TextIOBase],
weights_file: Union[PathABC, io.RawIOBase,
io.BufferedIOBase] = None) -> T:
"""
Parse the config and weights files and read the DarkNet layer's encoder,
decoder, and output layers. The number of bytes in the file is also returned.
Args:
config_file: str, path to yolo config file from Darknet
weights_file: str, path to yolo weights file from Darknet
Returns:
a DarkNetConverter object
"""
from .read_weights import read_weights
full_net = clz()
read_weights(full_net, config_file, weights_file)
return full_net
def to_tf(self,
thresh=0.45,
class_thresh=0.45,
max_boxes=200,
use_mixed=True):
import tensorflow as tf
tensors = _DarkNetSectionList()
layers = _DarkNetSectionList()
yolo_tensors = []
for i, cfg in enumerate(self.data):
tensor = cfg.to_tf(tensors)
# Handle weighted layers
if type(tensor) is tuple:
tensor, layer = tensor
else:
layer = None
assert tensor.shape[1:] == cfg.shape, str(
cfg
) + f" shape inconsistent\n\tExpected: {cfg.shape}\n\tGot: {tensor.shape[1:]}"
if cfg._type == 'yolo':
yolo_tensors.append((i, cfg, tensor))
tensors.append(tensor)
layers.append(layer)
model = tf.keras.Model(inputs=tensors.net,
outputs=self._process_yolo_layer(
yolo_tensors,
thresh=thresh,
class_thresh=class_thresh,
max_boxes=max_boxes,
use_mixed=use_mixed))
model.build(self.net.shape)
for cfg, layer in zip(self, layers):
if layer is not None:
layer.set_weights(cfg.get_weights())
return model
def _process_yolo_layer(self,
yolo_tensors,
thresh=0.45,
class_thresh=0.45,
max_boxes=200,
use_mixed=True):
import tensorflow as tf
from yolo.modeling.building_blocks import YoloLayer
if use_mixed:
from tensorflow.keras.mixed_precision import experimental as mixed_precision
# using mixed type policy give better performance than strictly float32
policy = mixed_precision.Policy('mixed_float16')
mixed_precision.set_policy(policy)
dtype = policy.compute_dtype
else:
dtype = tf.float32
outs = collections.OrderedDict()
masks = {}
anchors = None
scale_x_y = 1
path_scales = {}
for i, yolo_cfg, yolo_tensor in yolo_tensors:
masks[yolo_tensor.name] = yolo_cfg.mask
if anchors is None:
anchors = yolo_cfg.anchors
elif anchors != yolo_cfg.anchors:
raise ValueError('Anchors inconsistent in [yolo] layers')
if scale_x_y is None:
scale_x_y = yolo_cfg.scale_x_y
elif scale_x_y != yolo_cfg.scale_x_y:
raise ValueError('Scale inconsistent in [yolo] layers')
outs[yolo_tensor.name] = yolo_tensor
path_scales[yolo_tensor.name] = self.data[i - 1].c >> 5
yolo_layer = YoloLayer(
masks=masks,
anchors=anchors,
thresh=thresh,
cls_thresh=class_thresh,
max_boxes=max_boxes,
dtype=dtype,
#scale_boxes=self.net.w,
scale_xy=scale_x_y,
path_scale=path_scales)
return yolo_layer(outs)
#!/usr/bin/env python3
"Convert a DarkNet config file and weights into a TensorFlow model"
from absl import flags as _flags
from absl.flags import argparse_flags as _argparse_flags
import argparse as _argparse
_flags.DEFINE_boolean('weights_only', False,
'Save only the weights and not the entire model.')
from . import DarkNetConverter
def _makeParser(parser):
parser.add_argument('cfg',
default=None,
help='name of the config file. Defaults to YOLOv3',
type=_argparse.FileType('r'),
nargs='?')
parser.add_argument('weights',
default=None,
help='name of the weights file. Defaults to YOLOv3',
type=_argparse.FileType('rb'),
nargs='?')
parser.add_argument(
'output', help='name of the location to save the generated model')
def main(argv, args=None):
from ..file_manager import download
import os
if args is None:
args = _parser.parse_args(argv[1:])
cfg = args.cfg
weights = args.weights
output = args.output
if cfg is None:
cfg = download('yolov3.cfg')
if weights is None:
weights = download('yolov3.weights')
model = DarkNetConverter.read(cfg, weights).to_tf()
if output != os.devnull:
if flags.FLAGS.weights_only:
model.save_weights(output)
else:
model.save(output)
_parser = _argparse_flags.ArgumentParser()
_makeParser(_parser)
from absl import app
import sys
from . import main, _parser
if __name__ == '__main__':
# I dislike Abseil's current help menu. I like the default Python one
# better
if '-h' in sys.argv or '--help' in sys.argv:
_parser.parse_args(sys.argv[1:])
exit()
app.run(main)
This diff is collapsed.
"Convert a DarkNet config file into a Python literal file in a list of dictionaries format"
import collections
import configparser
import io
import sys
from typing import Dict, List
if sys.version_info < (3, 10):
# shim for Python 3.9 and older
from more_itertools import zip_equal
def zip(*iterables, strict=False):
if strict:
return zip_equal(*iterables)
else:
return __builtins__.zip(*iterables)
def _parseValue(key, val):
"""
Parse non-string literals found in darknet config files
"""
if ',' in val:
vals = val.split(',')
raw_list = tuple(_parseValue(key, v) for v in vals)
if key == 'anchors':
# Group the anchors list into pairs
# https://docs.python.org/3.10/library/functions.html#zip
raw_list = list(zip(*[iter(raw_list)] * 2, strict=True))
return raw_list
else:
if '.' in val:
try:
return float(val.strip())
except ValueError:
return val
else:
try:
return int(val.strip())
except ValueError:
return val
class multidict(collections.OrderedDict):
"""
A dict subclass that allows for multiple sections in a config file to share
names.
From: https://stackoverflow.com/a/9888814
"""
_unique = 0 # class variable
def __setitem__(self, key, val):
if isinstance(val, dict):
# This should only happen at the top-most level
self._unique += 1
val['_type'] = key
key = self._unique
elif isinstance(val, str):
val = _parseValue(key, val)
super().__setitem__(key, val)
class DNConfigParser(configparser.RawConfigParser):
def __init__(self, **kwargs):
super().__init__(defaults=None,
dict_type=multidict,
strict=False,
**kwargs)
def as_list(self) -> List[Dict[str, str]]:
"""
Converts a ConfigParser object into a dictionary.
The resulting dictionary has sections as keys which point to a dict of the
sections options as key => value pairs.
"""
the_list = []
for section in self.sections():
the_list.append(dict(self.items(section)))
return the_list
def as_dict(self) -> Dict[str, Dict[str, str]]:
"""
Converts a ConfigParser object into a dictionary.
The resulting dictionary has sections as keys which point to a dict of the
sections options as key => value pairs.
https://stackoverflow.com/a/23944270
"""
the_dict = {}
for section in self.sections():
the_dict[section] = dict(self.items(section))
return the_dict
def convertConfigFile(configfile):
parser = DNConfigParser()
if isinstance(configfile, io.IOBase):
if hasattr(configfile, 'name'):
print(configfile.name)
parser.read_file(configfile, source=configfile.name)
else:
parser.read_file(configfile)
else:
parser.read(configfile)
return parser.as_list()
#!/usr/bin/env python3
"Number the blocks in a DarkNet config file"
from absl import app, flags
from absl.flags import argparse_flags
import argparse
def _makeParser(parser):
parser.add_argument('filename',
default=None,
help='name of the config file. Defaults to YOLOv3',
nargs='?',
type=argparse.FileType('r'))
_parser = argparse_flags.ArgumentParser()
_makeParser(_parser)
def numberConfig(file):
i = 0
for line in file:
if line.startswith('[') and 'net' not in line:
print(f"{i:4d}|{line}", end='')
i += 1
else:
print(f" |{line}", end='')
def main(argv, args=None):
if args is None:
args = _parser.parse_args(argv[1:])
filename = args.filename
if filename is None:
from ..file_manager import download
with open(download('yolov3.cfg')) as file:
numberConfig(file)
else:
numberConfig(filename)
if __name__ == '__main__':
app.run(main)
"""
This file contains the code to load parsed weights that are in the DarkNet
format into TensorFlow layers
"""
import itertools
from tensorflow import keras as ks
from yolo.modeling.building_blocks import DarkConv
def split_converter(lst, i, j=None):
if j is None:
return lst.data[:i], lst.data[i:]
return lst.data[:i], lst.data[i:j], lst.data[j:]
def interleve_weights(block):
"""merge weights to fit the DarkResnet block style"""
if len(block) == 0:
return []
weights_temp = []
for layer in block:
weights = layer.get_weights()
weights = [tuple(weights[0:3]), tuple(weights[3:])]
weights_temp.append(weights)
top, bottom = tuple(zip(*weights_temp))
weights = list(itertools.chain.from_iterable(top)) + \
list(itertools.chain.from_iterable(bottom))
return weights
def get_darknet53_tf_format(net, only_weights=True):
"""convert weights from darknet sequntial to tensorflow weave, Darknet53 Backbone"""
combo_blocks = []
for i in range(2):
layer = net.pop(0)
combo_blocks.append(layer)
# ugly code i will document, very tired
encoder = []
while len(net) != 0:
blocks = []
layer = net.pop(0)
while layer._type != "shortcut":
blocks.append(layer)
layer = net.pop(0)
encoder.append(blocks)
new_net = combo_blocks + encoder
weights = []
if only_weights:
for block in new_net:
if type(block) != list:
weights.append(block.get_weights())
else:
weights.append(interleve_weights(block))
print("converted/interleved weights for tensorflow format")
return new_net, weights
def get_tiny_tf_format(encoder):
weights = []
for layer in encoder:
if layer._type != "maxpool":
weights.append(layer.get_weights())
return encoder, weights
def load_weights_dnBackbone(backbone, encoder, mtype="darknet53"):
# get weights for backbone
if mtype == "darknet53":
encoder, weights_encoder = get_darknet53_tf_format(encoder[:])
elif mtype == "darknet_tiny":
encoder, weights_encoder = get_tiny_tf_format(encoder[:])
# set backbone weights
print(
f"\nno. layers: {len(backbone.layers)}, no. weights: {len(weights_encoder)}"
)
set_darknet_weights(backbone, weights_encoder)
backbone.trainable = False
print(f"\nsetting backbone.trainable to: {backbone.trainable}\n")
return
def load_weights_dnHead(head, decoder, v4=True):
# get weights for head
decoder, weights_decoder, head_layers, head_weights = get_decoder_weights(
decoder)
# set detection head weights
print(
f"\nno. layers: {len(head.layers)}, no. weights: {len(weights_decoder)}"
)
flat_full = list(flatten_model(head, r_list=False))
flat_main = flat_full[:-3]
flat_head = flat_full[-3:]
# not the right way to do it
if v4:
flat_main.insert(1, flat_main[-1])
print(len(flat_main), len(decoder))
print(len(flat_head), len(head_layers))
set_darknet_weights(head, weights_decoder, flat_model=flat_main)
set_darknet_weights_head(flat_head, head_weights)
head.trainable = False
print(f"\nsetting head.trainable to: {head.trainable}\n")
return
# DEBUGGING
def print_layer_shape(layer):
try:
weights = layer.get_weights()
except:
weights = layer
for item in weights:
print(item.shape)
return
def flatten_model(model, r_list=True):
for layer in model.layers:
if r_list and isinstance(model, ks.Model):
yield from model.layers
else:
yield layer
def set_darknet_weights_head(flat_head, weights_head):
for layer in flat_head:
weights = layer.get_weights()
for weight in weights:
print(weight.shape)
weight_depth = weights[0].shape[-2]
for weight in weights_head:
if weight[0].shape[-2] == weight_depth:
print(
f"loaded weights for layer: head layer with depth {weight_depth} -> name: {layer.name}",
sep=' ',
end="\r")
layer.set_weights(weight)
return
def set_darknet_weights(model, weights_list, flat_model=None):
if flat_model == None:
zip_fill = flatten_model(model)
else:
zip_fill = flat_model
for i, (layer, weights) in enumerate(zip(zip_fill, weights_list)):
print(layer.name, len(weights))
#layer.set_weights(weights)
return
def split_decoder(lst):
decoder = []
outputs = []
for layer in lst:
if layer._type == 'yolo':
outputs.append(decoder.pop())
outputs.append(layer)
else:
decoder.append(layer)
return decoder, outputs
def get_decoder_weights(decoder):
layers = [[]]
block = []
weights = []
decoder, head = split_decoder(decoder)
# get decoder weights and group them together
for i, layer in enumerate(decoder):
if layer._type == "route" and len(
layer.layers) >= 2 and decoder[i - 1]._type != 'maxpool':
layers.append([])
layers.append(block)
block = []
elif layer._type == "route" and decoder[i - 1]._type != 'maxpool':
layers.append(block)
block = []
elif (layer._type == "route" and decoder[i - 1]._type
== "maxpool") or layer._type == "maxpool":
# made only for spp
continue
elif layer._type == "convolutional":
block.append(layer)
# else:
# # if you upsample
# layers.append([])
if len(block) > 0:
layers.append(block)
# interleve weights for blocked layers
for layer in layers:
weights.append(interleve_weights(layer))
# get weights for output detection heads
head_weights = []
head_layers = []
for layer in (head):
if layer != None and layer._type == "convolutional":
head_weights.append(layer.get_weights())
head_layers.append(layer)
return layers, weights, head_layers, head_weights
"""
This file contains the code to load parsed weights that are in the DarkNet
format into TensorFlow layers
"""
import itertools
from tensorflow import keras as ks
from collections import defaultdict
from yolo.modeling.building_blocks import DarkConv
from .config_classes import convCFG
def split_converter(lst, i, j=None):
if j is None:
return lst.data[:i], lst.data[i:]
return lst.data[:i], lst.data[i:j], lst.data[j:]
def interleve_weights(block):
"""merge weights to fit the DarkResnet block style"""
if len(block) == 0:
return []
weights_temp = []
for layer in block:
weights = layer.get_weights()
weights = [tuple(weights[0:3]), tuple(weights[3:])]
weights_temp.append(weights)
top, bottom = tuple(zip(*weights_temp))
weights = list(itertools.chain.from_iterable(top)) + \
list(itertools.chain.from_iterable(bottom))
return weights
def get_darknet53_tf_format(net, only_weights=True):
"""convert weights from darknet sequntial to tensorflow weave, Darknet53 Backbone"""
combo_blocks = []
for i in range(2):
layer = net.pop(0)
combo_blocks.append(layer)
# ugly code i will document, very tired
encoder = []
while len(net) != 0:
blocks = []
layer = net.pop(0)
while layer._type != "shortcut":
blocks.append(layer)
layer = net.pop(0)
encoder.append(blocks)
new_net = combo_blocks + encoder
weights = []
if only_weights:
for block in new_net:
if type(block) != list:
weights.append(block.get_weights())
else:
weights.append(interleve_weights(block))
print("converted/interleved weights for tensorflow format")
return new_net, weights
def get_tiny_tf_format(encoder):
weights = []
for layer in encoder:
if layer._type != "maxpool":
weights.append(layer.get_weights())
return encoder, weights
# DEBUGGING
def print_layer_shape(layer):
try:
weights = layer.get_weights()
except:
weights = layer
for item in weights:
print(item.shape)
return
def flatten_model(model):
for layer in model.layers:
if isinstance(model, ks.Model):
yield from model.layers
else:
yield layer
def set_darknet_weights_head(flat_head, weights_head):
for layer in flat_head:
weights = layer.get_weights()
weight_depth = weights[0].shape[-2]
for weight in weights_head:
if weight[0].shape[-2] == weight_depth:
print(
f"loaded weights for layer: head layer with depth {weight_depth} -> name: {layer.name}",
sep=' ',
end="\r")
layer.set_weights(weight)
return
def set_darknet_weights(model, weights_list, flat_model=None):
if flat_model == None:
zip_fill = flatten_model(model)
else:
zip_fill = flat_model
for i, (layer, weights) in enumerate(zip(zip_fill, weights_list)):
print(f"loaded weights for layer: {i} -> name: {layer.name}",
sep=' ',
end="\r")
layer.set_weights(weights)
return
def split_decoder(lst):
decoder = []
outputs = []
for layer in lst:
if layer._type == 'yolo':
outputs.append(decoder.pop())
outputs.append(layer)
else:
decoder.append(layer)
return decoder, outputs
def get_decoder_weights(decoder):
layers = [[]]
block = []
weights = []
decoder, head = split_decoder(decoder)
# get decoder weights and group them together
for i, layer in enumerate(decoder):
if layer._type == "route" and decoder[i - 1]._type != 'maxpool':
layers.append(block)
block = []
elif (layer._type == "route" and decoder[i - 1]._type
== "maxpool") or layer._type == "maxpool":
continue
elif layer._type == "convolutional":
block.append(layer)
else:
layers.append([])
if len(block) > 0:
layers.append(block)
# interleve weights for blocked layers
for layer in layers:
weights.append(interleve_weights(layer))
# get weights for output detection heads
head_weights = []
head_layers = []
for layer in (head):
if layer != None and layer._type == "convolutional":
head_weights.append(layer.get_weights())
head_layers.append(layer)
return layers, weights, head_layers, head_weights
def load_weights_backbone(model, net):
convs = []
for layer in net:
if isinstance(layer, convCFG):
convs.append(layer)
for layer in model.layers:
if isinstance(layer, DarkConv):
cfg = convs.pop(0)
layer.set_weights(cfg.get_weights())
else:
for sublayer in layer.submodules:
if isinstance(sublayer, DarkConv):
cfg = convs.pop(0)
sublayer.set_weights(cfg.get_weights())
def load_weights_v4head(model, net):
convs = []
for layer in net:
if isinstance(layer, convCFG):
convs.append(layer)
blocks = []
for layer in model.layers:
if isinstance(layer, DarkConv):
blocks.append([layer])
else:
block = []
for sublayer in layer.submodules:
if isinstance(sublayer, DarkConv):
block.append(sublayer)
if block:
blocks.append(block)
# 4 and 0 have the same shape
remap = [4, 6, 0, 1, 7, 2, 3, 5]
old_blocks = blocks
blocks = [old_blocks[i] for i in remap]
for block in blocks:
for layer in block:
cfg = convs.pop(0)
print(cfg) #, layer.input_shape)
layer.set_weights(cfg.get_weights())
print()
print(convs)
def load_weights_dnBackbone(backbone, encoder, mtype="darknet53"):
# get weights for backbone
if mtype == "DarkNet53":
encoder, weights_encoder = get_darknet53_tf_format(encoder[:])
elif mtype == "DarkNetTiny":
encoder, weights_encoder = get_tiny_tf_format(encoder[:])
# set backbone weights
print(
f"\nno. layers: {len(backbone.layers)}, no. weights: {len(weights_encoder)}"
)
set_darknet_weights(backbone, weights_encoder)
backbone.trainable = False
print(f"\nsetting backbone.trainable to: {backbone.trainable}\n")
return
def load_weights_dnHead(head, decoder):
# get weights for head
decoder, weights_decoder, head_layers, head_weights = get_decoder_weights(
decoder)
# set detection head weights
print(
f"\nno. layers: {len(head.layers)}, no. weights: {len(weights_decoder)}"
)
flat_full = list(flatten_model(head))
flat_main = flat_full[:-3]
flat_head = flat_full[-3:]
set_darknet_weights(head, weights_decoder, flat_model=flat_main)
set_darknet_weights_head(flat_head, head_weights)
head.trainable = False
print(f"\nsetting head.trainable to: {head.trainable}\n")
return
from collections import defaultdict
from official.vision.beta.projects.yolo.modeling.layers.nn_blocks import DarkConv
from .config_classes import convCFG
def split_converter(lst, i, j=None):
if j is None:
return lst.data[:i], lst.data[i:j], lst.data[j:]
return lst.data[:i], lst.data[i:]
def load_weights(convs, layers):
min_key = min(layers.keys())
max_key = max(layers.keys())
for i in range(min_key, max_key + 1):
try:
cfg = convs.pop(0)
print(cfg.c, cfg.filters, layers[i]._filters)
layers[i].set_weights(cfg.get_weights())
except:
print(f"an error has occured, {layers[i].name}, {i}")
def load_weights_backbone(model, net):
convs = []
for layer in net:
if isinstance(layer, convCFG):
convs.append(layer)
layers = dict()
base_key = 0
alternate = 0
for layer in model.layers:
# non sub module conv blocks
if isinstance(layer, DarkConv):
if base_key + alternate not in layers.keys():
layers[base_key + alternate] = layer
else:
base_key += 1
layers[base_key + alternate] = layer
print(base_key + alternate, layer.name)
base_key += 1
else:
#base_key = max(layers.keys())
for sublayer in layer.submodules:
if isinstance(sublayer, DarkConv):
if sublayer.name == "dark_conv":
key = 0
else:
key = int(sublayer.name.split("_")[-1])
layers[key + base_key] = sublayer
print(key + base_key, sublayer.name)
if key > alternate:
alternate = key
#alternate += 1
load_weights(convs, layers)
return
def ishead(out_conv, layer):
if layer.filters == out_conv:
return True
return False
def load_head(model, net, out_conv=255):
convs = []
cfg_heads = []
for layer in net:
if isinstance(layer, convCFG):
if not ishead(out_conv, layer):
convs.append(layer)
else:
cfg_heads.append(layer)
layers = dict()
heads = dict()
for layer in model.layers:
# non sub module conv blocks
if isinstance(layer, DarkConv):
if layer.name == "dark_conv":
key = 0
else:
key = int(layer.name.split("_")[-1])
if ishead(out_conv, layer):
heads[key] = layer
else:
layers[key] = layer
else:
for sublayer in layer.submodules:
if isinstance(sublayer, DarkConv):
if sublayer.name == "dark_conv":
key = 0
else:
key = int(sublayer.name.split("_")[-1])
if ishead(out_conv, sublayer):
heads[key] = sublayer
else:
layers[key] = sublayer
print(key, sublayer.name)
load_weights(convs, layers)
load_weights(cfg_heads, heads)
return
def load_weights_v4head(model, net, remap):
convs = []
for layer in net:
if isinstance(layer, convCFG):
convs.append(layer)
layers = dict()
base_key = 0
for layer in model.layers:
if isinstance(layer, DarkConv):
if layer.name == "dark_conv":
key = 0
else:
key = int(layer.name.split("_")[-1])
layers[key] = layer
base_key += 1
print(base_key, layer.name)
else:
for sublayer in layer.submodules:
if isinstance(sublayer, DarkConv):
if sublayer.name == "dark_conv":
key = 0 + base_key
else:
key = int(sublayer.name.split("_")[-1]) + base_key
layers[key] = sublayer
print(key, sublayer.name)
"""
This file contains the code to parse DarkNet weight files.
"""
import io
import numpy as np
import os
from typing import Union
from .config_classes import *
from .dn2dicts import convertConfigFile
from ..file_manager import PathABC, get_size, open_if_not_open
def build_layer(layer_dict, file, net):
"""consturct layer and load weights from file"""
layer = layer_builder[layer_dict['_type']].from_dict(net, layer_dict)
bytes_read = 0
if file is not None:
bytes_read = layer.load_weights(file)
return layer, bytes_read
def read_file(full_net, config, weights=None):
"""read the file and construct weights net list"""
bytes_read = 0
if weights is not None:
major, minor, revision = read_n_int(3, weights)
bytes_read += 12
if ((major * 10 + minor) >= 2):
print("64 seen")
iseen = read_n_long(1, weights, unsigned=True)[0]
bytes_read += 8
else:
print("32 seen")
iseen = read_n_int(1, weights, unsigned=True)[0]
bytes_read += 4
print(f"major: {major}")
print(f"minor: {minor}")
print(f"revision: {revision}")
print(f"iseen: {iseen}")
for i, layer_dict in enumerate(config):
try:
layer, num_read = build_layer(layer_dict, weights, full_net)
except Exception as e:
raise ValueError(f"Cannot read weights for layer [#{i}]") from e
full_net.append(layer)
bytes_read += num_read
print(f"{bytes_read} {layer}")
return bytes_read
def read_weights(full_net, config_file, weights_file):
if weights_file is None:
with open_if_not_open(config_file) as config:
config = convertConfigFile(config)
read_file(full_net, config)
return full_net
size = get_size(weights_file)
with open_if_not_open(config_file) as config, \
open_if_not_open(weights_file, "rb") as weights:
config = convertConfigFile(config)
bytes_read = read_file(full_net, config, weights)
print('full net: ')
for e in full_net:
print(f"{e.w} {e.h} {e.c}\t{e}")
print(
f"bytes_read: {bytes_read}, original_size: {size}, final_position: {weights.tell()}"
)
"""
if (bytes_read != size):
raise IOError('error reading weights file')
"""
[net] #0
batch=1
subdivisions=1
height=448
width=448
channels=3
momentum=0.9
decay=0.0005
saturation=1.5
exposure=1.5
hue=.1
learning_rate=0.0005
policy=steps
steps=200,400,600,20000,30000
scales=2.5,2,2,.1,.1
max_batches = 40000
[convolutional] #1
batch_normalize=1
filters=64
size=7
stride=2
pad=1
activation=leaky
[maxpool] #2
size=2
stride=2
[convolutional] #3
batch_normalize=1
filters=192
size=3
stride=1
pad=1
activation=leaky
[maxpool] #4
size=24
stride=2
[convolutional] #5
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional] #6
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[convolutional] #7
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional] #8
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[maxpool] #9
size=2
stride=2
[convolutional] #10
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional] #11
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[convolutional] #12
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional] #13
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[convolutional] #14
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional] #15
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[convolutional] #16
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional] #17
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[convolutional] #18
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional] #19
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[maxpool] #20
size=2
stride=2
[convolutional] #21
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional] #22
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[convolutional] #23
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional] #24
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
#######
[convolutional] #25
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional] #26
batch_normalize=1
size=3
stride=2
pad=1
filters=1024
activation=leaky
[convolutional] #27
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional] #28
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[local] #29
size=3
stride=1
pad=1
filters=256
activation=leaky
[dropout] #30
probability=.5
[connected] #31
output= 1715
activation=linear
[detection] #32
classes=20
coords=4
rescore=1
side=7
num=3
softmax=0
sqrt=1
jitter=.2
object_scale=1
noobject_scale=.5
class_scale=1
coord_scale=5
\ No newline at end of file
"""
Manage the downloading of external files that are used in YOLO networks.
"""
# from __future__ import annotations
import io
import os
from typing import Union
# define PathABC type
try:
PathABC = Union[bytes, str, os.PathLike]
except AttributeError:
# not Python 3.6+
import pathlib
PathABC = Union[bytes, str, pathlib.Path]
def get_size(path: Union[PathABC, io.IOBase]) -> int:
"""
A unified method to find the size of a file, either by its path or an open
file object.
Arguments:
path: a path (as a str or a Path object) or an open file (which must be
seekable)
Return:
size of the file
Raises:
ValueError: the IO object given as path is not open or it not seekable
FileNotFoundError: the given path is invalid
"""
if isinstance(path, io.IOBase):
if path.seekable():
currentPos = path.tell()
path.seek(-1, io.SEEK_END)
size = path.tell() + 1
path.seek(currentPos)
return size
else:
raise ValueError(
"IO object must be seekable in order to find the size.")
else:
return os.path.getsize(path)
def open_if_not_open(file: Union[PathABC, io.IOBase], *args,
**kwargs) -> io.IOBase:
"""
Takes an input that can either be a file or a path. If the input is given as
a file, it is returned without modification. If it is a path, it is opened
as a file.
Arguments:
file: a path or file that is being opened if it already isn't
*args, **kwargs: to see the potential additional arguments or keywords
that can be based into this function, consult the open
builtin function.
Returns:
opened file
Raises:
IOError: consult the open builtin function for the potential IO errors
that can be raised when opening the file
"""
if isinstance(file, io.IOBase):
return file
return open(file, *args, **kwargs)
# URL names that can be accessed using the download function
urls = {
'yolov1.cfg':
('https://raw.githubusercontent.com/pjreddie/darknet/master/cfg/yolov1.cfg',
'cfg',
'8b4b951dd646478ea4214cb389d152793ca98e5c6e67266884908ba084b6211e'),
'yolov2.cfg':
('https://raw.githubusercontent.com/AlexeyAB/darknet/master/cfg/yolov2.cfg',
'cfg',
'57d85d77262c840b56ad5418faae4950d9c7727e0192fb70618eeaac26a19817'),
'yolov3.cfg':
('https://raw.githubusercontent.com/pjreddie/darknet/master/cfg/yolov3.cfg',
'cfg',
'22489ea38575dfa36c67a90048e8759576416a79d32dc11e15d2217777b9a953'),
'yolov3-spp.cfg':
('https://raw.githubusercontent.com/pjreddie/darknet/master/cfg/yolov3-spp.cfg',
'cfg',
'7a4ec2d7427340fb12059f2b0ef76d6fcfcac132cc287cbbf0be5e3abaa856fd'),
'yolov3-tiny.cfg':
('https://raw.githubusercontent.com/pjreddie/darknet/master/cfg/yolov3-tiny.cfg',
'cfg',
'84eb7a675ef87c906019ff5a6e0effe275d175adb75100dcb47f0727917dc2c7'),
'yolov4.cfg':
('https://raw.githubusercontent.com/AlexeyAB/darknet/master/cfg/yolov4.cfg',
'cfg',
'a6d0f8e5c62cc8378384f75a8159b95fa2964d4162e33351b00ac82e0fc46a34'),
'yolov4-tiny.cfg':
('https://raw.githubusercontent.com/AlexeyAB/darknet/master/cfg/yolov4-tiny.cfg',
'cfg',
'6cbf5ece15235f66112e0bedebb324f37199b31aee385b7e18f0bbfb536b258e'),
'yolov1.weights':
('http://pjreddie.com/media/files/yolov1/yolov1.weights', 'weights',
'df414df832ed10e3f2788df1e0e0ae573976573b97b5eec2c824ab9e5a8ae6d6'),
'yolov2.weights':
('https://pjreddie.com/media/files/yolov2.weights', 'weights',
'd9945162ed6f54ce1a901e3ec537bdba4d572ecae7873087bd730e5a7942df3f'),
'yolov3.weights':
('https://pjreddie.com/media/files/yolov3.weights', 'weights',
'523e4e69e1d015393a1b0a441cef1d9c7659e3eb2d7e15f793f060a21b32f297'),
'yolov3-spp.weights':
('https://pjreddie.com/media/files/yolov3-spp.weights', 'weights',
'87a1e8c85c763316f34e428f2295e1db9ed4abcec59dd9544f8052f50de327b4'),
'yolov3-tiny.weights':
('https://pjreddie.com/media/files/yolov3-tiny.weights', 'weights',
'dccea06f59b781ec1234ddf8d1e94b9519a97f4245748a7d4db75d5b7080a42c'),
'yolov4.weights':
('https://github.com/AlexeyAB/darknet/releases/download/darknet_yolo_v3_optimal/yolov4.weights',
'weights',
'e8a4f6c62188738d86dc6898d82724ec0964d0eb9d2ae0f0a9d53d65d108d562'),
'yolov4-tiny.weights':
('https://github.com/AlexeyAB/darknet/releases/download/darknet_yolo_v4_pre/yolov4-tiny.weights',
'weights',
'037676f0d929c24e1bd9a0037fe30dc416fc26e0ca2a4491a44d024873316061'),
}
def download(name: str, trust: bool = False) -> str:
"""
Download a predefined file named `name` from the original repository.
For example, yolov3.weights is a file that defines the pretrained YOLOv3
model. It can be downloaded from https://pjreddie.com/media/files/yolov3.weights
so it is downloaded from there.
Args:
name: Name of the file that will be downloaded
trust: Trust the cache even if the file's hash is inconsistent
This option can speed the loading of the file at the expense of
security. Default value is False.
Returns:
The path of the downloaded file as a `str`
Raises:
KeyError: Name of file is not found in the `urls` variable.
ValueError: Name or URL stored in the `urls` variable is invalid.
OSError: There was a problem saving the file when it was being
downloaded.
HTTPException: The file was not able to be downloaded.
Exception: Any other undocumented error that ks.utils.get_file may
have thrown to indicate that the file was inaccessible.
"""
import tensorflow.keras as ks
from http.client import HTTPException
url, type, hash = urls[name]
cache_dir = os.path.abspath('cache')
full_path = os.path.join(cache_dir, type, name)
if trust and os.path.exists(full_path):
return full_path
try:
if hash is None:
return ks.utils.get_file(name,
url,
cache_dir=cache_dir,
cache_subdir=type)
else:
return ks.utils.get_file(name,
url,
cache_dir=cache_dir,
cache_subdir=type,
file_hash=hash,
hash_algorithm='sha256')
except Exception as e:
if 'URL fetch failure on' in str(e):
raise HTTPException(str(e)) from e
else:
raise
from absl.testing import parameterized
import tensorflow as tf
import numpy as np
try:
from importlib import resources as importlib_resources
except BaseException:
# Shim for Python 3.6 and older
import importlib_resources
from official.vision.beta.projects.yolo.modeling.backbones.darknet import Darknet
from yolo.utils._darknet2tf import DarkNetConverter
from yolo.utils._darknet2tf.load_weights2 import load_weights_backbone
class darknet2tf_test(tf.test.TestCase, parameterized.TestCase):
def test_load_yolov3_weights(self):
x = tf.ones(shape=[1, 224, 224, 3], dtype=tf.float32)
model = Darknet(model_id='darknettiny')
encoder = DarkNetConverter.read('cache/cfg/yolov3-tiny.cfg', 'cache/weights/yolov3-tiny.weights')
encode = encoder[:12]
load_weights_backbone(model, encoder)
y: tf.Tensor = model(x)
if __name__ == "__main__":
tf.test.main()
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment