Commit da0d0d27 authored by anivegesana's avatar anivegesana
Browse files

Fix typos

parent 868cea8f
......@@ -33,20 +33,20 @@ class DarkConv(ks.layers.Layer):
strides: integer of tuple how much to move the kernel after each kernel use
padding: string 'valid' or 'same', if same, then pad the image, else do not
dialtion_rate: tuple to indicate how much to modulate kernel weights and
the how many pixels ina featur map to skip
use_bias: boolean to indicate wither to use bias in convolution layer
kernel_initializer: string to indicate which function to use to initialize weigths
how many pixels in a feature map to skip
use_bias: boolean to indicate whether to use bias in convolution layer
kernel_initializer: string to indicate which function to use to initialize weights
bias_initializer: string to indicate which function to use to initialize bias
kernel_regularizer: string to indicate which function to use to regularizer weights
bias_regularizer: string to indicate which function to use to regularizer bias
group_id: integer for which group of features to pass through the conv.
groups: integer for how many splits there should be in the convolution feature stack input
grouping_only: skip the convolution and only return the group of featuresindicated by grouping_only
use_bn: boolean for wether to use batchnormalization
use_sync_bn: boolean for wether sync batch normalization statistics
grouping_only: skip the convolution and only return the group of features indicated by grouping_only
use_bn: boolean for whether to use batch normalization
use_sync_bn: boolean for whether sync batch normalization statistics
of all batch norm layers to the models global statistics (across all input batches)
norm_moment: float for moment to use for batchnorm
norm_epsilon: float for batchnorm epsilon
norm_moment: float for moment to use for batch normalization
norm_epsilon: float for batch normalization epsilon
activation: string or None for activation function to use in layer,
if None activation is replaced by linear
leaky_alpha: float to use as alpha if activation function is leaky
......@@ -91,7 +91,7 @@ class DarkConv(ks.layers.Layer):
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
# batchnorm params
# batch normalization params
self._use_bn = use_bn
if self._use_bn:
self._use_bias = False
......@@ -137,7 +137,6 @@ class DarkConv(ks.layers.Layer):
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
#self.conv =tf.nn.convolution(filters=self._filters, strides=self._strides, padding=self._padding
if self._use_bn:
if self._use_sync_bn:
self.bn = tf.keras.layers.experimental.SyncBatchNormalization(
......@@ -207,18 +206,18 @@ class DarkTiny(ks.layers.Layer):
Args:
filters: integer for output depth, or the number of features to learn
use_bias: boolean to indicate wither to use bias in convolution layer
kernel_initializer: string to indicate which function to use to initialize weigths
use_bias: boolean to indicate whether to use bias in convolution layer
kernel_initializer: string to indicate which function to use to initialize weights
bias_initializer: string to indicate which function to use to initialize bias
kernel_regularizer: string to indicate which function to use to regularizer weights
bias_regularizer: string to indicate which function to use to regularizer bias
use_bn: boolean for wether to use batchnormalization
use_sync_bn: boolean for wether sync batch normalization statistics
of all batch norm layers to the models global statistics (across all input batches)
kernel_regularizer: string to indicate which function to use to regularize weights
bias_regularizer: string to indicate which function to use to regularize bias
use_bn: boolean for whether to use batch normalization
use_sync_bn: boolean for whether to sync batch normalization statistics
of all batch norm layers to the models' global statistics (across all input batches)
group_id: integer for which group of features to pass through the csp tiny stack.
groups: integer for how many splits there should be in the convolution feature stack output
norm_moment: float for moment to use for batchnorm
norm_epsilon: float for batchnorm epsilon
norm_moment: float for moment to use for batch normalization
norm_epsilon: float for batch normalization epsilon
activation: string or None for activation function to use in layer,
if None activation is replaced by linear
**kwargs: Keyword Arguments
......@@ -314,22 +313,22 @@ class DarkResidual(ks.layers.Layer):
Args:
filters: integer for output depth, or the number of features to learn
use_bias: boolean to indicate wither to use bias in convolution layer
kernel_initializer: string to indicate which function to use to initialize weigths
use_bias: boolean to indicate whether to use bias in convolution layer
kernel_initializer: string to indicate which function to use to initialize weights
bias_initializer: string to indicate which function to use to initialize bias
kernel_regularizer: string to indicate which function to use to regularizer weights
bias_regularizer: string to indicate which function to use to regularizer bias
use_bn: boolean for wether to use batchnormalization
use_sync_bn: boolean for wether sync batch normalization statistics
use_bn: boolean for whether to use batch normalization
use_sync_bn: boolean for whether sync batch normalization statistics
of all batch norm layers to the models global statistics (across all input batches)
norm_moment: float for moment to use for batchnorm
norm_epsilon: float for batchnorm epsilon
norm_moment: float for moment to use for batch normalization
norm_epsilon: float for batch normalization epsilon
conv_activation: string or None for activation function to use in layer,
if None activation is replaced by linear
leaky_alpha: float to use as alpha if activation function is leaky
sc_activation: string for activation function to use in layer
downsample: boolean for if image input is larger than layer output, set downsample to True
so the dimentions are forced to match
so the dimensions are forced to match
**kwargs: Keyword Arguments
'''
......@@ -472,24 +471,24 @@ class CSPTiny(ks.layers.Layer):
Args:
filters: integer for output depth, or the number of features to learn
use_bias: boolean to indicate wither to use bias in convolution layer
kernel_initializer: string to indicate which function to use to initialize weigths
use_bias: boolean to indicate whether to use bias in convolution layer
kernel_initializer: string to indicate which function to use to initialize weights
bias_initializer: string to indicate which function to use to initialize bias
use_bn: boolean for wether to use batchnormalization
use_bn: boolean for whether to use batch normalization
kernel_regularizer: string to indicate which function to use to regularizer weights
bias_regularizer: string to indicate which function to use to regularizer bias
use_sync_bn: boolean for wether sync batch normalization statistics
use_sync_bn: boolean for whether sync batch normalization statistics
of all batch norm layers to the models global statistics (across all input batches)
group_id: integer for which group of features to pass through the csp tiny stack.
groups: integer for how many splits there should be in the convolution feature stack output
norm_moment: float for moment to use for batchnorm
norm_epsilon: float for batchnorm epsilon
norm_moment: float for moment to use for batch normalization
norm_epsilon: float for batch normalization epsilon
conv_activation: string or None for activation function to use in layer,
if None activation is replaced by linear
leaky_alpha: float to use as alpha if activation function is leaky
sc_activation: string for activation function to use in layer
downsample: boolean for if image input is larger than layer output, set downsample to True
so the dimentions are forced to match
so the dimensions are forced to match
**kwargs: Keyword Arguments
"""
def __init__(
......@@ -665,11 +664,11 @@ class CSPDownSample(ks.layers.Layer):
bias_initializer: string to indicate which function to use to initialize bias
kernel_regularizer: string to indicate which function to use to regularizer weights
bias_regularizer: string to indicate which function to use to regularizer bias
use_bn: boolean for wether to use batchnormalization
use_sync_bn: boolean for wether sync batch normalization statistics
use_bn: boolean for whether to use batch normalization
use_sync_bn: boolean for whether sync batch normalization statistics
of all batch norm layers to the models global statistics (across all input batches)
norm_moment: float for moment to use for batchnorm
norm_epsilon: float for batchnorm epsilon
norm_moment: float for moment to use for batch normalization
norm_epsilon: float for batch normalization epsilon
**kwargs: Keyword Arguments
"""
def __init__(
......@@ -767,11 +766,11 @@ class CSPConnect(ks.layers.Layer):
bias_initializer: string to indicate which function to use to initialize bias
kernel_regularizer: string to indicate which function to use to regularizer weights
bias_regularizer: string to indicate which function to use to regularizer bias
use_bn: boolean for wether to use batchnormalization
use_sync_bn: boolean for wether sync batch normalization statistics
use_bn: boolean for whether to use batch normalization
use_sync_bn: boolean for whether sync batch normalization statistics
of all batch norm layers to the models global statistics (across all input batches)
norm_moment: float for moment to use for batchnorm
norm_epsilon: float for batchnorm epsilon
norm_moment: float for moment to use for batch normalization
norm_epsilon: float for batch normalization epsilon
**kwargs: Keyword Arguments
"""
def __init__(
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment