"git@developer.sourcefind.cn:OpenDAS/ollama.git" did not exist on "4b34930a31ceb9cc10d95b8bcd60c319f47d8043"
Commit 9ee02b45 authored by anivegesana's avatar anivegesana
Browse files

Remove bare returns

parent d909d4fc
...@@ -27,6 +27,3 @@ from official.vision.beta.projects.yolo.configs.darknet_classification import im ...@@ -27,6 +27,3 @@ from official.vision.beta.projects.yolo.configs.darknet_classification import im
from official.vision.beta.projects.yolo.configs.darknet_classification import ImageClassificationTask from official.vision.beta.projects.yolo.configs.darknet_classification import ImageClassificationTask
from official.vision.beta.projects.yolo.tasks.image_classification import ImageClassificationTask from official.vision.beta.projects.yolo.tasks.image_classification import ImageClassificationTask
# task_factory.register_task_cls(ImageClassificationTask)(ImageClassificationTask)
# print(task_factory._REGISTERED_TASK_CLS)
\ No newline at end of file
...@@ -378,7 +378,6 @@ class Darknet(ks.Model): ...@@ -378,7 +378,6 @@ class Darknet(ks.Model):
"use_sync_bn": self._use_sync_bn, "use_sync_bn": self._use_sync_bn,
"activation": self._activation "activation": self._activation
} }
#layer_config.update(super().get_config())
return layer_config return layer_config
@factory.register_backbone_builder('darknet') @factory.register_backbone_builder('darknet')
......
...@@ -36,7 +36,6 @@ class CSPConnect(ks.layers.Layer): ...@@ -36,7 +36,6 @@ class CSPConnect(ks.layers.Layer):
self._use_sync_bn = use_sync_bn self._use_sync_bn = use_sync_bn
self._norm_moment = norm_momentum self._norm_moment = norm_momentum
self._norm_epsilon = norm_epsilon self._norm_epsilon = norm_epsilon
return
def build(self, input_shape): def build(self, input_shape):
self._conv1 = DarkConv(filters=self._filters // self._filter_reduce, self._conv1 = DarkConv(filters=self._filters // self._filter_reduce,
...@@ -64,7 +63,6 @@ class CSPConnect(ks.layers.Layer): ...@@ -64,7 +63,6 @@ class CSPConnect(ks.layers.Layer):
norm_momentum=self._norm_moment, norm_momentum=self._norm_moment,
norm_epsilon=self._norm_epsilon, norm_epsilon=self._norm_epsilon,
activation=self._activation) activation=self._activation)
return
def call(self, inputs): def call(self, inputs):
x_prev, x_csp = inputs x_prev, x_csp = inputs
......
...@@ -36,7 +36,6 @@ class CSPDownSample(ks.layers.Layer): ...@@ -36,7 +36,6 @@ class CSPDownSample(ks.layers.Layer):
self._use_sync_bn = use_sync_bn self._use_sync_bn = use_sync_bn
self._norm_moment = norm_momentum self._norm_moment = norm_momentum
self._norm_epsilon = norm_epsilon self._norm_epsilon = norm_epsilon
return
def build(self, input_shape): def build(self, input_shape):
self._conv1 = DarkConv(filters=self._filters, self._conv1 = DarkConv(filters=self._filters,
...@@ -76,7 +75,6 @@ class CSPDownSample(ks.layers.Layer): ...@@ -76,7 +75,6 @@ class CSPDownSample(ks.layers.Layer):
norm_momentum=self._norm_moment, norm_momentum=self._norm_moment,
norm_epsilon=self._norm_epsilon, norm_epsilon=self._norm_epsilon,
activation=self._activation) activation=self._activation)
return
def call(self, inputs): def call(self, inputs):
x = self._conv1(inputs) x = self._conv1(inputs)
......
...@@ -48,7 +48,6 @@ class CSPTiny(ks.layers.Layer): ...@@ -48,7 +48,6 @@ class CSPTiny(ks.layers.Layer):
self._leaky_alpha = leaky_alpha self._leaky_alpha = leaky_alpha
super().__init__(**kwargs) super().__init__(**kwargs)
return
def build(self, input_shape): def build(self, input_shape):
self._convlayer1 = DarkConv(filters=self._filters, self._convlayer1 = DarkConv(filters=self._filters,
...@@ -121,7 +120,6 @@ class CSPTiny(ks.layers.Layer): ...@@ -121,7 +120,6 @@ class CSPTiny(ks.layers.Layer):
data_format=None) data_format=None)
super().build(input_shape) super().build(input_shape)
return
def call(self, inputs): def call(self, inputs):
x1 = self._convlayer1(inputs) x1 = self._convlayer1(inputs)
......
...@@ -92,7 +92,6 @@ class DarkConv(ks.layers.Layer): ...@@ -92,7 +92,6 @@ class DarkConv(ks.layers.Layer):
self._leaky_alpha = leaky_alpha self._leaky_alpha = leaky_alpha
super(DarkConv, self).__init__(**kwargs) super(DarkConv, self).__init__(**kwargs)
return
def build(self, input_shape): def build(self, input_shape):
kernel_size = self._kernel_size if type( kernel_size = self._kernel_size if type(
...@@ -136,7 +135,6 @@ class DarkConv(ks.layers.Layer): ...@@ -136,7 +135,6 @@ class DarkConv(ks.layers.Layer):
self._activation_fn = mish() self._activation_fn = mish()
else: else:
self._activation_fn = ks.layers.Activation(activation=self._activation) self._activation_fn = ks.layers.Activation(activation=self._activation)
return
def call(self, inputs): def call(self, inputs):
x = self._zeropad(inputs) x = self._zeropad(inputs)
......
...@@ -71,7 +71,6 @@ class DarkResidual(ks.layers.Layer): ...@@ -71,7 +71,6 @@ class DarkResidual(ks.layers.Layer):
self._sc_activation = sc_activation self._sc_activation = sc_activation
super().__init__(**kwargs) super().__init__(**kwargs)
return
def build(self, input_shape): def build(self, input_shape):
if self._downsample: if self._downsample:
...@@ -128,7 +127,6 @@ class DarkResidual(ks.layers.Layer): ...@@ -128,7 +127,6 @@ class DarkResidual(ks.layers.Layer):
self._activation_fn = ks.layers.Activation(activation=self._sc_activation) self._activation_fn = ks.layers.Activation(activation=self._sc_activation)
super().build(input_shape) super().build(input_shape)
return
def call(self, inputs): def call(self, inputs):
shortcut = self._dconv(inputs) shortcut = self._dconv(inputs)
......
...@@ -46,7 +46,6 @@ class DarkTiny(ks.layers.Layer): ...@@ -46,7 +46,6 @@ class DarkTiny(ks.layers.Layer):
self._sc_activation = sc_activation self._sc_activation = sc_activation
super().__init__(**kwargs) super().__init__(**kwargs)
return
def build(self, input_shape): def build(self, input_shape):
self._maxpool = tf.keras.layers.MaxPool2D(pool_size=2, self._maxpool = tf.keras.layers.MaxPool2D(pool_size=2,
...@@ -71,7 +70,6 @@ class DarkTiny(ks.layers.Layer): ...@@ -71,7 +70,6 @@ class DarkTiny(ks.layers.Layer):
leaky_alpha=self._leaky_alpha) leaky_alpha=self._leaky_alpha)
super().build(input_shape) super().build(input_shape)
return
def call(self, inputs): def call(self, inputs):
output = self._maxpool(inputs) output = self._maxpool(inputs)
......
...@@ -7,8 +7,6 @@ import tensorflow.keras as ks ...@@ -7,8 +7,6 @@ import tensorflow.keras as ks
class Identity(ks.layers.Layer): class Identity(ks.layers.Layer):
def __init__(self, **kwargs): def __init__(self, **kwargs):
super().__init__(**kwargs) super().__init__(**kwargs)
return
def call(self, input): def call(self, input):
return input return input
...@@ -4,7 +4,6 @@ import tensorflow.keras as ks ...@@ -4,7 +4,6 @@ import tensorflow.keras as ks
class mish(ks.layers.Layer): class mish(ks.layers.Layer):
def __init__(self, **kwargs): def __init__(self, **kwargs):
super().__init__(**kwargs) super().__init__(**kwargs)
return
def call(self, x): def call(self, x):
return x * tf.math.tanh(ks.activations.softplus(x)) return x * tf.math.tanh(ks.activations.softplus(x))
...@@ -23,7 +23,6 @@ class CSPConnect(tf.test.TestCase, parameterized.TestCase): ...@@ -23,7 +23,6 @@ class CSPConnect(tf.test.TestCase, parameterized.TestCase):
outx.shape.as_list(), outx.shape.as_list(),
[None, np.ceil(width // 2), [None, np.ceil(width // 2),
np.ceil(height // 2), (filters)]) np.ceil(height // 2), (filters)])
return
@parameterized.named_parameters(("same", 224, 224, 64, 1), @parameterized.named_parameters(("same", 224, 224, 64, 1),
("downsample", 224, 224, 128, 2)) ("downsample", 224, 224, 128, 2))
...@@ -49,7 +48,6 @@ class CSPConnect(tf.test.TestCase, parameterized.TestCase): ...@@ -49,7 +48,6 @@ class CSPConnect(tf.test.TestCase, parameterized.TestCase):
optimizer.apply_gradients(zip(grad, test_layer.trainable_variables)) optimizer.apply_gradients(zip(grad, test_layer.trainable_variables))
self.assertNotIn(None, grad) self.assertNotIn(None, grad)
return
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -21,7 +21,6 @@ class CSPDownSample(tf.test.TestCase, parameterized.TestCase): ...@@ -21,7 +21,6 @@ class CSPDownSample(tf.test.TestCase, parameterized.TestCase):
outx.shape.as_list(), outx.shape.as_list(),
[None, np.ceil(width // 2), [None, np.ceil(width // 2),
np.ceil(height // 2), (filters / mod)]) np.ceil(height // 2), (filters / mod)])
return
@parameterized.named_parameters(("same", 224, 224, 64, 1), @parameterized.named_parameters(("same", 224, 224, 64, 1),
("downsample", 224, 224, 128, 2)) ("downsample", 224, 224, 128, 2))
...@@ -47,7 +46,6 @@ class CSPDownSample(tf.test.TestCase, parameterized.TestCase): ...@@ -47,7 +46,6 @@ class CSPDownSample(tf.test.TestCase, parameterized.TestCase):
optimizer.apply_gradients(zip(grad, test_layer.trainable_variables)) optimizer.apply_gradients(zip(grad, test_layer.trainable_variables))
self.assertNotIn(None, grad) self.assertNotIn(None, grad)
return
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -31,7 +31,6 @@ class DarkConvTest(tf.test.TestCase, parameterized.TestCase): ...@@ -31,7 +31,6 @@ class DarkConvTest(tf.test.TestCase, parameterized.TestCase):
] ]
print(test) print(test)
self.assertAllEqual(outx.shape.as_list(), test) self.assertAllEqual(outx.shape.as_list(), test)
return
@parameterized.named_parameters(("filters", 3)) @parameterized.named_parameters(("filters", 3))
def test_gradient_pass_though(self, filters): def test_gradient_pass_though(self, filters):
...@@ -52,7 +51,6 @@ class DarkConvTest(tf.test.TestCase, parameterized.TestCase): ...@@ -52,7 +51,6 @@ class DarkConvTest(tf.test.TestCase, parameterized.TestCase):
grad = tape.gradient(grad_loss, test_layer.trainable_variables) grad = tape.gradient(grad_loss, test_layer.trainable_variables)
optimizer.apply_gradients(zip(grad, test_layer.trainable_variables)) optimizer.apply_gradients(zip(grad, test_layer.trainable_variables))
self.assertNotIn(None, grad) self.assertNotIn(None, grad)
return
if __name__ == "__main__": if __name__ == "__main__":
tf.test.main() tf.test.main()
...@@ -24,7 +24,6 @@ class DarkResidualTest(tf.test.TestCase, parameterized.TestCase): ...@@ -24,7 +24,6 @@ class DarkResidualTest(tf.test.TestCase, parameterized.TestCase):
outx.shape.as_list(), outx.shape.as_list(),
[None, np.ceil(width / mod), [None, np.ceil(width / mod),
np.ceil(height / mod), filters]) np.ceil(height / mod), filters])
return
@parameterized.named_parameters(("same", 64, 224, 224, False), @parameterized.named_parameters(("same", 64, 224, 224, False),
("downsample", 32, 223, 223, True), ("downsample", 32, 223, 223, True),
...@@ -54,7 +53,6 @@ class DarkResidualTest(tf.test.TestCase, parameterized.TestCase): ...@@ -54,7 +53,6 @@ class DarkResidualTest(tf.test.TestCase, parameterized.TestCase):
optimizer.apply_gradients(zip(grad, test_layer.trainable_variables)) optimizer.apply_gradients(zip(grad, test_layer.trainable_variables))
self.assertNotIn(None, grad) self.assertNotIn(None, grad)
return
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -20,7 +20,6 @@ class DarkTinyTest(tf.test.TestCase, parameterized.TestCase): ...@@ -20,7 +20,6 @@ class DarkTinyTest(tf.test.TestCase, parameterized.TestCase):
self.assertEqual(height % strides, 0, msg="height % strides != 0") self.assertEqual(height % strides, 0, msg="height % strides != 0")
self.assertAllEqual(outx.shape.as_list(), self.assertAllEqual(outx.shape.as_list(),
[None, width // strides, height // strides, filters]) [None, width // strides, height // strides, filters])
return
@parameterized.named_parameters(("middle", 224, 224, 64, 2), @parameterized.named_parameters(("middle", 224, 224, 64, 2),
("last", 224, 224, 1024, 1)) ("last", 224, 224, 1024, 1))
...@@ -43,7 +42,6 @@ class DarkTinyTest(tf.test.TestCase, parameterized.TestCase): ...@@ -43,7 +42,6 @@ class DarkTinyTest(tf.test.TestCase, parameterized.TestCase):
optimizer.apply_gradients(zip(grad, test_layer.trainable_variables)) optimizer.apply_gradients(zip(grad, test_layer.trainable_variables))
self.assertNotIn(None, grad) self.assertNotIn(None, grad)
return
if __name__ == "__main__": if __name__ == "__main__":
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment