"examples/instruct_pix2pix/train_instruct_pix2pix.py" did not exist on "e0d8c9ef838d0a7372a4807cd978e032bd26c572"
Commit 7c34eac7 authored by The-Indian-Chinna's avatar The-Indian-Chinna
Browse files

Dr.Davis Commented Pull Request Fixes

parent 0bfc786c
......@@ -15,7 +15,7 @@ This repository is the unofficial implementation of the following papers. Howeve
Yolo v1 the original implementation was released in 2015 providing a ground breaking algorithm that would quickly process images, and locate objects in a single pass through the detector. The original implementation based used a backbone derived from state of the art object classifier of the time, like [GoogLeNet](https://arxiv.org/abs/1409.4842) and [VGG](https://arxiv.org/abs/1409.1556). More attention was given to the novel Yolo Detection head that allowed for Object Detection with a single pass of an image. Though limited, the network could predict up to 90 bounding boxes per image, and was tested for about 80 classes per box. Also, the model could only make prediction at one scale. These attributes caused yolo v1 to be more limited, and less versatile, so as the year passed, the Developers continued to update and develop this model.
Yolo v3 and v4 serve as the most up to date and capable versions of the Yolo network group. These model uses a custom backbone called Darknet53 that uses knowledge gained from the ResNet paper to improve its predictions. The new backbone also allows for objects to be detected at multiple scales. As for the new detection head, the model now predicts the bounding boxes using a set of anchor box priors (Anchor Boxes) as suggestions. The multiscale predictions in combination with the Anchor boxes allows for the network to make up to 1000 object predictions on a single image. Finally, the new loss function forces the network to make better prediction by using Intersection Over Union (IOU) to inform the models confidence rather than relying on the mean squared error for the entire output.
Yolo v3 and v4 serve as the most up to date and capable versions of the Yolo network group. These model uses a custom backbone called Darknet53 that uses knowledge gained from the ResNet paper to improve its predictions. The new backbone also allows for objects to be detected at multiple scales. As for the new detection head, the model now predicts the bounding boxes using a set of anchor box priors (Anchor Boxes) as suggestions. The multiscale predictions in combination with the Anchor boxes allows for the network to make up to 1000 object predictions on a single image. Finally, the new loss function forces the network to make better prediction by using Intersection Over Union (IOU) to inform the model's confidence rather than relying on the mean squared error for the entire output.
## Authors
......@@ -33,7 +33,8 @@ Yolo v3 and v4 serve as the most up to date and capable versions of the Yolo net
## Our Goal
Our goal with this model conversion is to provide highly versatile implementations of the Backbone and Yolo Head. We have tried to build the model in such a way that the Yolo head could easily be connected to a new, more powerful backbone if a person chose to.
Our goal with this model conversion is to provide implementations of the Backbone and Yolo Head. We have built the model in such a way that the Yolo head could be connected to a new, more powerful backbone if a person chose to.
## Models in the library
......
"""Backbones configurations."""
# Import libraries
import dataclasses
from typing import Optional
from official.modeling import hyperparams
# from official.vision.beta.configs import backbones
from official.modeling import hyperparams
@dataclasses.dataclass
class DarkNet(hyperparams.Config):
"""DarkNet config."""
model_id: str = "darknet53"
\ No newline at end of file
# # we could not get this to work
# @dataclasses.dataclass
# class Backbone(backbones.Backbone):
# darknet: DarkNet = DarkNet()
......@@ -50,4 +50,4 @@ trainer:
warmup:
type: 'linear'
linear:
warmup_steps: 64000 #lr rise from 0 to 0.1 over 1000 steps
warmup_steps: 64000 #learning rate rises from 0 to 0.1 over 1000 steps
......@@ -250,7 +250,7 @@ class Darknet(ks.Model):
name=f"{config.layer}_{i}")
stack_outputs.append(x_pass)
if (config.is_output and
self._min_size == None): # or isinstance(config.output_name, str):
self._min_size == None):
endpoints[config.output_name] = x
elif self._min_size != None and config.output_name >= self._min_size and config.output_name <= self._max_size:
endpoints[config.output_name] = x
......
......@@ -14,7 +14,7 @@ class CSPConnect(ks.layers.Layer):
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
bias_regularizer=None,
weight_decay=None, # default find where is it is stated
weight_decay=None,
use_bn=True,
use_sync_bn=False,
norm_momentum=0.99,
......
......@@ -14,7 +14,7 @@ class CSPDownSample(ks.layers.Layer):
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
bias_regularizer=None,
weight_decay=None, # default find where is it is stated
weight_decay=None,
use_bn=True,
use_sync_bn=False,
norm_momentum=0.99,
......
......@@ -14,7 +14,7 @@ class CSPTiny(ks.layers.Layer):
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
bias_regularizer=None,
weight_decay=None, # default find where is it is stated
weight_decay=None,
use_bn=True,
use_sync_bn=False,
group_id=1,
......
......@@ -23,7 +23,7 @@ class DarkConv(ks.layers.Layer):
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
bias_regularizer=None,
weight_decay=None, # default find where is it is stated
weight_decay=None, # Specify the weight decay as the default will not work.
use_bn=True,
use_sync_bn=False,
norm_momentum=0.99,
......@@ -99,7 +99,7 @@ class DarkConv(ks.layers.Layer):
self._kernel_size) == int else self._kernel_size[0]
if self._padding == "same" and kernel_size != 1:
self._zeropad = ks.layers.ZeroPadding2D(
((1, 1), (1, 1))) # symetric padding
((1, 1), (1, 1))) # symmetric padding
else:
self._zeropad = Identity()
......@@ -107,7 +107,7 @@ class DarkConv(ks.layers.Layer):
filters=self._filters,
kernel_size=self._kernel_size,
strides=self._strides,
padding="valid", #self._padding,
padding="valid",
dilation_rate=self._dilation_rate,
use_bias=self._use_bias,
kernel_initializer=self._kernel_initializer,
......@@ -148,7 +148,7 @@ class DarkConv(ks.layers.Layer):
return x
def get_config(self):
# used to store/share parameters to reconsturct the model
# used to store/share parameters to reconstruct the model
layer_config = {
"filters": self._filters,
"kernel_size": self._kernel_size,
......
......@@ -138,7 +138,7 @@ class DarkResidual(ks.layers.Layer):
return self._activation_fn(x)
def get_config(self):
# used to store/share parameters to reconsturct the model
# used to store/share parameters to reconstruct the model
layer_config = {
"filters": self._filters,
"use_bias": self._use_bias,
......
......@@ -54,19 +54,5 @@ class DarkConvTest(tf.test.TestCase, parameterized.TestCase):
self.assertNotIn(None, grad)
return
# @parameterized.named_parameters(("filters", 3), ("filters", 20), ("filters", 512))
# def test_time(self, filters):
# # finish the test for time
# dataset = tfds.load("mnist")
# model = ks.Sequential([
# DarkConv(7, kernel_size=(3,3), strides = (2,2), activation='relu'),
# DarkConv(10, kernel_size=(3,3), strides = (2,2), activation='relu'),
# DarkConv(filters, kernel_size=(3,3), strides = (1,1), activation='relu'),
# DarkConv(9, kernel_size=(3,3), strides = (2,2), activation='relu'),
# ks.layers.GlobalAveragePooling2D(),
# ks.layers.Dense(10, activation='softmax')], name='test')
# return
if __name__ == "__main__":
tf.test.main()
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment