Commit 08ec7955 authored by anivegesana's avatar anivegesana
Browse files

Remove dead files

parent 230732b2
"""
This file contains the code to load parsed weights that are in the DarkNet
format into TensorFlow layers
"""
import itertools
from tensorflow import keras as ks
from collections import defaultdict
from yolo.modeling.building_blocks import DarkConv
from .config_classes import convCFG
def split_converter(lst, i, j=None):
if j is None:
return lst.data[:i], lst.data[i:]
return lst.data[:i], lst.data[i:j], lst.data[j:]
def interleve_weights(block):
"""merge weights to fit the DarkResnet block style"""
if len(block) == 0:
return []
weights_temp = []
for layer in block:
weights = layer.get_weights()
weights = [tuple(weights[0:3]), tuple(weights[3:])]
weights_temp.append(weights)
top, bottom = tuple(zip(*weights_temp))
weights = list(itertools.chain.from_iterable(top)) + \
list(itertools.chain.from_iterable(bottom))
return weights
def get_darknet53_tf_format(net, only_weights=True):
"""convert weights from darknet sequntial to tensorflow weave, Darknet53 Backbone"""
combo_blocks = []
for i in range(2):
layer = net.pop(0)
combo_blocks.append(layer)
# ugly code i will document, very tired
encoder = []
while len(net) != 0:
blocks = []
layer = net.pop(0)
while layer._type != "shortcut":
blocks.append(layer)
layer = net.pop(0)
encoder.append(blocks)
new_net = combo_blocks + encoder
weights = []
if only_weights:
for block in new_net:
if type(block) != list:
weights.append(block.get_weights())
else:
weights.append(interleve_weights(block))
print("converted/interleved weights for tensorflow format")
return new_net, weights
def get_tiny_tf_format(encoder):
weights = []
for layer in encoder:
if layer._type != "maxpool":
weights.append(layer.get_weights())
return encoder, weights
# DEBUGGING
def print_layer_shape(layer):
try:
weights = layer.get_weights()
except:
weights = layer
for item in weights:
print(item.shape)
return
def flatten_model(model):
for layer in model.layers:
if isinstance(model, ks.Model):
yield from model.layers
else:
yield layer
def set_darknet_weights_head(flat_head, weights_head):
for layer in flat_head:
weights = layer.get_weights()
weight_depth = weights[0].shape[-2]
for weight in weights_head:
if weight[0].shape[-2] == weight_depth:
print(
f"loaded weights for layer: head layer with depth {weight_depth} -> name: {layer.name}",
sep=' ',
end="\r")
layer.set_weights(weight)
return
def set_darknet_weights(model, weights_list, flat_model=None):
if flat_model == None:
zip_fill = flatten_model(model)
else:
zip_fill = flat_model
for i, (layer, weights) in enumerate(zip(zip_fill, weights_list)):
print(f"loaded weights for layer: {i} -> name: {layer.name}",
sep=' ',
end="\r")
layer.set_weights(weights)
return
def split_decoder(lst):
decoder = []
outputs = []
for layer in lst:
if layer._type == 'yolo':
outputs.append(decoder.pop())
outputs.append(layer)
else:
decoder.append(layer)
return decoder, outputs
def get_decoder_weights(decoder):
layers = [[]]
block = []
weights = []
decoder, head = split_decoder(decoder)
# get decoder weights and group them together
for i, layer in enumerate(decoder):
if layer._type == "route" and decoder[i - 1]._type != 'maxpool':
layers.append(block)
block = []
elif (layer._type == "route" and decoder[i - 1]._type
== "maxpool") or layer._type == "maxpool":
continue
elif layer._type == "convolutional":
block.append(layer)
else:
layers.append([])
if len(block) > 0:
layers.append(block)
# interleve weights for blocked layers
for layer in layers:
weights.append(interleve_weights(layer))
# get weights for output detection heads
head_weights = []
head_layers = []
for layer in (head):
if layer != None and layer._type == "convolutional":
head_weights.append(layer.get_weights())
head_layers.append(layer)
return layers, weights, head_layers, head_weights
def load_weights_backbone(model, net):
convs = []
for layer in net:
if isinstance(layer, convCFG):
convs.append(layer)
for layer in model.layers:
if isinstance(layer, DarkConv):
cfg = convs.pop(0)
layer.set_weights(cfg.get_weights())
else:
for sublayer in layer.submodules:
if isinstance(sublayer, DarkConv):
cfg = convs.pop(0)
sublayer.set_weights(cfg.get_weights())
def load_weights_v4head(model, net):
convs = []
for layer in net:
if isinstance(layer, convCFG):
convs.append(layer)
blocks = []
for layer in model.layers:
if isinstance(layer, DarkConv):
blocks.append([layer])
else:
block = []
for sublayer in layer.submodules:
if isinstance(sublayer, DarkConv):
block.append(sublayer)
if block:
blocks.append(block)
# 4 and 0 have the same shape
remap = [4, 6, 0, 1, 7, 2, 3, 5]
old_blocks = blocks
blocks = [old_blocks[i] for i in remap]
for block in blocks:
for layer in block:
cfg = convs.pop(0)
print(cfg) #, layer.input_shape)
layer.set_weights(cfg.get_weights())
print()
print(convs)
def load_weights_dnBackbone(backbone, encoder, mtype="darknet53"):
# get weights for backbone
if mtype == "DarkNet53":
encoder, weights_encoder = get_darknet53_tf_format(encoder[:])
elif mtype == "DarkNetTiny":
encoder, weights_encoder = get_tiny_tf_format(encoder[:])
# set backbone weights
print(
f"\nno. layers: {len(backbone.layers)}, no. weights: {len(weights_encoder)}"
)
set_darknet_weights(backbone, weights_encoder)
backbone.trainable = False
print(f"\nsetting backbone.trainable to: {backbone.trainable}\n")
return
def load_weights_dnHead(head, decoder):
# get weights for head
decoder, weights_decoder, head_layers, head_weights = get_decoder_weights(
decoder)
# set detection head weights
print(
f"\nno. layers: {len(head.layers)}, no. weights: {len(weights_decoder)}"
)
flat_full = list(flatten_model(head))
flat_main = flat_full[:-3]
flat_head = flat_full[-3:]
set_darknet_weights(head, weights_decoder, flat_model=flat_main)
set_darknet_weights_head(flat_head, head_weights)
head.trainable = False
print(f"\nsetting head.trainable to: {head.trainable}\n")
return
from collections import defaultdict
from official.vision.beta.projects.yolo.modeling.layers.nn_blocks import DarkConv
from .config_classes import convCFG
def split_converter(lst, i, j=None):
if j is None:
return lst.data[:i], lst.data[i:j], lst.data[j:]
return lst.data[:i], lst.data[i:]
def load_weights(convs, layers):
min_key = min(layers.keys())
max_key = max(layers.keys())
for i in range(min_key, max_key + 1):
try:
cfg = convs.pop(0)
print(cfg.c, cfg.filters, layers[i]._filters)
layers[i].set_weights(cfg.get_weights())
except:
print(f"an error has occured, {layers[i].name}, {i}")
def load_weights_backbone(model, net):
convs = []
for layer in net:
if isinstance(layer, convCFG):
convs.append(layer)
layers = dict()
base_key = 0
alternate = 0
for layer in model.layers:
# non sub module conv blocks
if isinstance(layer, DarkConv):
if base_key + alternate not in layers.keys():
layers[base_key + alternate] = layer
else:
base_key += 1
layers[base_key + alternate] = layer
print(base_key + alternate, layer.name)
base_key += 1
else:
#base_key = max(layers.keys())
for sublayer in layer.submodules:
if isinstance(sublayer, DarkConv):
if sublayer.name == "dark_conv":
key = 0
else:
key = int(sublayer.name.split("_")[-1])
layers[key + base_key] = sublayer
print(key + base_key, sublayer.name)
if key > alternate:
alternate = key
#alternate += 1
load_weights(convs, layers)
return
def ishead(out_conv, layer):
if layer.filters == out_conv:
return True
return False
def load_head(model, net, out_conv=255):
convs = []
cfg_heads = []
for layer in net:
if isinstance(layer, convCFG):
if not ishead(out_conv, layer):
convs.append(layer)
else:
cfg_heads.append(layer)
layers = dict()
heads = dict()
for layer in model.layers:
# non sub module conv blocks
if isinstance(layer, DarkConv):
if layer.name == "dark_conv":
key = 0
else:
key = int(layer.name.split("_")[-1])
if ishead(out_conv, layer):
heads[key] = layer
else:
layers[key] = layer
else:
for sublayer in layer.submodules:
if isinstance(sublayer, DarkConv):
if sublayer.name == "dark_conv":
key = 0
else:
key = int(sublayer.name.split("_")[-1])
if ishead(out_conv, sublayer):
heads[key] = sublayer
else:
layers[key] = sublayer
print(key, sublayer.name)
load_weights(convs, layers)
load_weights(cfg_heads, heads)
return
def load_weights_v4head(model, net, remap):
convs = []
for layer in net:
if isinstance(layer, convCFG):
convs.append(layer)
layers = dict()
base_key = 0
for layer in model.layers:
if isinstance(layer, DarkConv):
if layer.name == "dark_conv":
key = 0
else:
key = int(layer.name.split("_")[-1])
layers[key] = layer
base_key += 1
print(base_key, layer.name)
else:
for sublayer in layer.submodules:
if isinstance(sublayer, DarkConv):
if sublayer.name == "dark_conv":
key = 0 + base_key
else:
key = int(sublayer.name.split("_")[-1]) + base_key
layers[key] = sublayer
print(key, sublayer.name)
"""
This file contains the code to parse DarkNet weight files.
"""
import io
import numpy as np
import os
from typing import Union
from .config_classes import *
from .dn2dicts import convertConfigFile
from ..file_manager import PathABC, get_size, open_if_not_open
def build_layer(layer_dict, file, net):
"""consturct layer and load weights from file"""
layer = layer_builder[layer_dict['_type']].from_dict(net, layer_dict)
bytes_read = 0
if file is not None:
bytes_read = layer.load_weights(file)
return layer, bytes_read
def read_file(full_net, config, weights=None):
"""read the file and construct weights net list"""
bytes_read = 0
if weights is not None:
major, minor, revision = read_n_int(3, weights)
bytes_read += 12
if ((major * 10 + minor) >= 2):
print("64 seen")
iseen = read_n_long(1, weights, unsigned=True)[0]
bytes_read += 8
else:
print("32 seen")
iseen = read_n_int(1, weights, unsigned=True)[0]
bytes_read += 4
print(f"major: {major}")
print(f"minor: {minor}")
print(f"revision: {revision}")
print(f"iseen: {iseen}")
for i, layer_dict in enumerate(config):
try:
layer, num_read = build_layer(layer_dict, weights, full_net)
except Exception as e:
raise ValueError(f"Cannot read weights for layer [#{i}]") from e
full_net.append(layer)
bytes_read += num_read
print(f"{bytes_read} {layer}")
return bytes_read
def read_weights(full_net, config_file, weights_file):
if weights_file is None:
with open_if_not_open(config_file) as config:
config = convertConfigFile(config)
read_file(full_net, config)
return full_net
size = get_size(weights_file)
with open_if_not_open(config_file) as config, \
open_if_not_open(weights_file, "rb") as weights:
config = convertConfigFile(config)
bytes_read = read_file(full_net, config, weights)
print('full net: ')
for e in full_net:
print(f"{e.w} {e.h} {e.c}\t{e}")
print(
f"bytes_read: {bytes_read}, original_size: {size}, final_position: {weights.tell()}"
)
"""
if (bytes_read != size):
raise IOError('error reading weights file')
"""
[net] #0
batch=1
subdivisions=1
height=448
width=448
channels=3
momentum=0.9
decay=0.0005
saturation=1.5
exposure=1.5
hue=.1
learning_rate=0.0005
policy=steps
steps=200,400,600,20000,30000
scales=2.5,2,2,.1,.1
max_batches = 40000
[convolutional] #1
batch_normalize=1
filters=64
size=7
stride=2
pad=1
activation=leaky
[maxpool] #2
size=2
stride=2
[convolutional] #3
batch_normalize=1
filters=192
size=3
stride=1
pad=1
activation=leaky
[maxpool] #4
size=24
stride=2
[convolutional] #5
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional] #6
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[convolutional] #7
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional] #8
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[maxpool] #9
size=2
stride=2
[convolutional] #10
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional] #11
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[convolutional] #12
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional] #13
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[convolutional] #14
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional] #15
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[convolutional] #16
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional] #17
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[convolutional] #18
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional] #19
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[maxpool] #20
size=2
stride=2
[convolutional] #21
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional] #22
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[convolutional] #23
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional] #24
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
#######
[convolutional] #25
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional] #26
batch_normalize=1
size=3
stride=2
pad=1
filters=1024
activation=leaky
[convolutional] #27
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional] #28
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[local] #29
size=3
stride=1
pad=1
filters=256
activation=leaky
[dropout] #30
probability=.5
[connected] #31
output= 1715
activation=linear
[detection] #32
classes=20
coords=4
rescore=1
side=7
num=3
softmax=0
sqrt=1
jitter=.2
object_scale=1
noobject_scale=.5
class_scale=1
coord_scale=5
\ No newline at end of file
"""
Manage the downloading of external files that are used in YOLO networks.
"""
# from __future__ import annotations
import io
import os
from typing import Union
# define PathABC type
try:
PathABC = Union[bytes, str, os.PathLike]
except AttributeError:
# not Python 3.6+
import pathlib
PathABC = Union[bytes, str, pathlib.Path]
def get_size(path: Union[PathABC, io.IOBase]) -> int:
"""
A unified method to find the size of a file, either by its path or an open
file object.
Arguments:
path: a path (as a str or a Path object) or an open file (which must be
seekable)
Return:
size of the file
Raises:
ValueError: the IO object given as path is not open or it not seekable
FileNotFoundError: the given path is invalid
"""
if isinstance(path, io.IOBase):
if path.seekable():
currentPos = path.tell()
path.seek(-1, io.SEEK_END)
size = path.tell() + 1
path.seek(currentPos)
return size
else:
raise ValueError(
"IO object must be seekable in order to find the size.")
else:
return os.path.getsize(path)
def open_if_not_open(file: Union[PathABC, io.IOBase], *args,
**kwargs) -> io.IOBase:
"""
Takes an input that can either be a file or a path. If the input is given as
a file, it is returned without modification. If it is a path, it is opened
as a file.
Arguments:
file: a path or file that is being opened if it already isn't
*args, **kwargs: to see the potential additional arguments or keywords
that can be based into this function, consult the open
builtin function.
Returns:
opened file
Raises:
IOError: consult the open builtin function for the potential IO errors
that can be raised when opening the file
"""
if isinstance(file, io.IOBase):
return file
return open(file, *args, **kwargs)
# URL names that can be accessed using the download function
urls = {
'yolov1.cfg':
('https://raw.githubusercontent.com/pjreddie/darknet/master/cfg/yolov1.cfg',
'cfg',
'8b4b951dd646478ea4214cb389d152793ca98e5c6e67266884908ba084b6211e'),
'yolov2.cfg':
('https://raw.githubusercontent.com/AlexeyAB/darknet/master/cfg/yolov2.cfg',
'cfg',
'57d85d77262c840b56ad5418faae4950d9c7727e0192fb70618eeaac26a19817'),
'yolov3.cfg':
('https://raw.githubusercontent.com/pjreddie/darknet/master/cfg/yolov3.cfg',
'cfg',
'22489ea38575dfa36c67a90048e8759576416a79d32dc11e15d2217777b9a953'),
'yolov3-spp.cfg':
('https://raw.githubusercontent.com/pjreddie/darknet/master/cfg/yolov3-spp.cfg',
'cfg',
'7a4ec2d7427340fb12059f2b0ef76d6fcfcac132cc287cbbf0be5e3abaa856fd'),
'yolov3-tiny.cfg':
('https://raw.githubusercontent.com/pjreddie/darknet/master/cfg/yolov3-tiny.cfg',
'cfg',
'84eb7a675ef87c906019ff5a6e0effe275d175adb75100dcb47f0727917dc2c7'),
'yolov4.cfg':
('https://raw.githubusercontent.com/AlexeyAB/darknet/master/cfg/yolov4.cfg',
'cfg',
'a6d0f8e5c62cc8378384f75a8159b95fa2964d4162e33351b00ac82e0fc46a34'),
'yolov4-tiny.cfg':
('https://raw.githubusercontent.com/AlexeyAB/darknet/master/cfg/yolov4-tiny.cfg',
'cfg',
'6cbf5ece15235f66112e0bedebb324f37199b31aee385b7e18f0bbfb536b258e'),
'yolov1.weights':
('http://pjreddie.com/media/files/yolov1/yolov1.weights', 'weights',
'df414df832ed10e3f2788df1e0e0ae573976573b97b5eec2c824ab9e5a8ae6d6'),
'yolov2.weights':
('https://pjreddie.com/media/files/yolov2.weights', 'weights',
'd9945162ed6f54ce1a901e3ec537bdba4d572ecae7873087bd730e5a7942df3f'),
'yolov3.weights':
('https://pjreddie.com/media/files/yolov3.weights', 'weights',
'523e4e69e1d015393a1b0a441cef1d9c7659e3eb2d7e15f793f060a21b32f297'),
'yolov3-spp.weights':
('https://pjreddie.com/media/files/yolov3-spp.weights', 'weights',
'87a1e8c85c763316f34e428f2295e1db9ed4abcec59dd9544f8052f50de327b4'),
'yolov3-tiny.weights':
('https://pjreddie.com/media/files/yolov3-tiny.weights', 'weights',
'dccea06f59b781ec1234ddf8d1e94b9519a97f4245748a7d4db75d5b7080a42c'),
'yolov4.weights':
('https://github.com/AlexeyAB/darknet/releases/download/darknet_yolo_v3_optimal/yolov4.weights',
'weights',
'e8a4f6c62188738d86dc6898d82724ec0964d0eb9d2ae0f0a9d53d65d108d562'),
'yolov4-tiny.weights':
('https://github.com/AlexeyAB/darknet/releases/download/darknet_yolo_v4_pre/yolov4-tiny.weights',
'weights',
'037676f0d929c24e1bd9a0037fe30dc416fc26e0ca2a4491a44d024873316061'),
}
def download(name: str, trust: bool = False) -> str:
"""
Download a predefined file named `name` from the original repository.
For example, yolov3.weights is a file that defines the pretrained YOLOv3
model. It can be downloaded from https://pjreddie.com/media/files/yolov3.weights
so it is downloaded from there.
Args:
name: Name of the file that will be downloaded
trust: Trust the cache even if the file's hash is inconsistent
This option can speed the loading of the file at the expense of
security. Default value is False.
Returns:
The path of the downloaded file as a `str`
Raises:
KeyError: Name of file is not found in the `urls` variable.
ValueError: Name or URL stored in the `urls` variable is invalid.
OSError: There was a problem saving the file when it was being
downloaded.
HTTPException: The file was not able to be downloaded.
Exception: Any other undocumented error that ks.utils.get_file may
have thrown to indicate that the file was inaccessible.
"""
import tensorflow.keras as ks
from http.client import HTTPException
url, type, hash = urls[name]
cache_dir = os.path.abspath('cache')
full_path = os.path.join(cache_dir, type, name)
if trust and os.path.exists(full_path):
return full_path
try:
if hash is None:
return ks.utils.get_file(name,
url,
cache_dir=cache_dir,
cache_subdir=type)
else:
return ks.utils.get_file(name,
url,
cache_dir=cache_dir,
cache_subdir=type,
file_hash=hash,
hash_algorithm='sha256')
except Exception as e:
if 'URL fetch failure on' in str(e):
raise HTTPException(str(e)) from e
else:
raise
from absl.testing import parameterized
import tensorflow as tf
import numpy as np
try:
from importlib import resources as importlib_resources
except BaseException:
# Shim for Python 3.6 and older
import importlib_resources
from official.vision.beta.projects.yolo.modeling.backbones.darknet import Darknet
from yolo.utils._darknet2tf import DarkNetConverter
from yolo.utils._darknet2tf.load_weights2 import load_weights_backbone
class darknet2tf_test(tf.test.TestCase, parameterized.TestCase):
def test_load_yolov3_weights(self):
x = tf.ones(shape=[1, 224, 224, 3], dtype=tf.float32)
model = Darknet(model_id='darknettiny')
encoder = DarkNetConverter.read('cache/cfg/yolov3-tiny.cfg', 'cache/weights/yolov3-tiny.weights')
encode = encoder[:12]
load_weights_backbone(model, encoder)
y: tf.Tensor = model(x)
if __name__ == "__main__":
tf.test.main()
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment