"docs/en/git@developer.sourcefind.cn:wangsen/mineru.git" did not exist on "2447092d00e21dd62ccc1c6ee0c768db41ea1344"
Commit 9ee02b45 authored by anivegesana's avatar anivegesana
Browse files

Remove bare returns

parent d909d4fc
......@@ -27,6 +27,3 @@ from official.vision.beta.projects.yolo.configs.darknet_classification import im
from official.vision.beta.projects.yolo.configs.darknet_classification import ImageClassificationTask
from official.vision.beta.projects.yolo.tasks.image_classification import ImageClassificationTask
# task_factory.register_task_cls(ImageClassificationTask)(ImageClassificationTask)
# print(task_factory._REGISTERED_TASK_CLS)
\ No newline at end of file
......@@ -378,7 +378,6 @@ class Darknet(ks.Model):
"use_sync_bn": self._use_sync_bn,
"activation": self._activation
}
#layer_config.update(super().get_config())
return layer_config
@factory.register_backbone_builder('darknet')
......
......@@ -36,7 +36,6 @@ class CSPConnect(ks.layers.Layer):
self._use_sync_bn = use_sync_bn
self._norm_moment = norm_momentum
self._norm_epsilon = norm_epsilon
return
def build(self, input_shape):
self._conv1 = DarkConv(filters=self._filters // self._filter_reduce,
......@@ -64,7 +63,6 @@ class CSPConnect(ks.layers.Layer):
norm_momentum=self._norm_moment,
norm_epsilon=self._norm_epsilon,
activation=self._activation)
return
def call(self, inputs):
x_prev, x_csp = inputs
......
......@@ -36,7 +36,6 @@ class CSPDownSample(ks.layers.Layer):
self._use_sync_bn = use_sync_bn
self._norm_moment = norm_momentum
self._norm_epsilon = norm_epsilon
return
def build(self, input_shape):
self._conv1 = DarkConv(filters=self._filters,
......@@ -76,7 +75,6 @@ class CSPDownSample(ks.layers.Layer):
norm_momentum=self._norm_moment,
norm_epsilon=self._norm_epsilon,
activation=self._activation)
return
def call(self, inputs):
x = self._conv1(inputs)
......
......@@ -48,7 +48,6 @@ class CSPTiny(ks.layers.Layer):
self._leaky_alpha = leaky_alpha
super().__init__(**kwargs)
return
def build(self, input_shape):
self._convlayer1 = DarkConv(filters=self._filters,
......@@ -121,7 +120,6 @@ class CSPTiny(ks.layers.Layer):
data_format=None)
super().build(input_shape)
return
def call(self, inputs):
x1 = self._convlayer1(inputs)
......
......@@ -92,7 +92,6 @@ class DarkConv(ks.layers.Layer):
self._leaky_alpha = leaky_alpha
super(DarkConv, self).__init__(**kwargs)
return
def build(self, input_shape):
kernel_size = self._kernel_size if type(
......@@ -136,7 +135,6 @@ class DarkConv(ks.layers.Layer):
self._activation_fn = mish()
else:
self._activation_fn = ks.layers.Activation(activation=self._activation)
return
def call(self, inputs):
x = self._zeropad(inputs)
......
......@@ -71,7 +71,6 @@ class DarkResidual(ks.layers.Layer):
self._sc_activation = sc_activation
super().__init__(**kwargs)
return
def build(self, input_shape):
if self._downsample:
......@@ -128,7 +127,6 @@ class DarkResidual(ks.layers.Layer):
self._activation_fn = ks.layers.Activation(activation=self._sc_activation)
super().build(input_shape)
return
def call(self, inputs):
shortcut = self._dconv(inputs)
......
......@@ -46,7 +46,6 @@ class DarkTiny(ks.layers.Layer):
self._sc_activation = sc_activation
super().__init__(**kwargs)
return
def build(self, input_shape):
self._maxpool = tf.keras.layers.MaxPool2D(pool_size=2,
......@@ -71,7 +70,6 @@ class DarkTiny(ks.layers.Layer):
leaky_alpha=self._leaky_alpha)
super().build(input_shape)
return
def call(self, inputs):
output = self._maxpool(inputs)
......
......@@ -7,8 +7,6 @@ import tensorflow.keras as ks
class Identity(ks.layers.Layer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
return
def call(self, input):
return input
......@@ -4,7 +4,6 @@ import tensorflow.keras as ks
class mish(ks.layers.Layer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
return
def call(self, x):
return x * tf.math.tanh(ks.activations.softplus(x))
......@@ -23,7 +23,6 @@ class CSPConnect(tf.test.TestCase, parameterized.TestCase):
outx.shape.as_list(),
[None, np.ceil(width // 2),
np.ceil(height // 2), (filters)])
return
@parameterized.named_parameters(("same", 224, 224, 64, 1),
("downsample", 224, 224, 128, 2))
......@@ -49,7 +48,6 @@ class CSPConnect(tf.test.TestCase, parameterized.TestCase):
optimizer.apply_gradients(zip(grad, test_layer.trainable_variables))
self.assertNotIn(None, grad)
return
if __name__ == "__main__":
......
......@@ -21,7 +21,6 @@ class CSPDownSample(tf.test.TestCase, parameterized.TestCase):
outx.shape.as_list(),
[None, np.ceil(width // 2),
np.ceil(height // 2), (filters / mod)])
return
@parameterized.named_parameters(("same", 224, 224, 64, 1),
("downsample", 224, 224, 128, 2))
......@@ -47,7 +46,6 @@ class CSPDownSample(tf.test.TestCase, parameterized.TestCase):
optimizer.apply_gradients(zip(grad, test_layer.trainable_variables))
self.assertNotIn(None, grad)
return
if __name__ == "__main__":
......
......@@ -31,7 +31,6 @@ class DarkConvTest(tf.test.TestCase, parameterized.TestCase):
]
print(test)
self.assertAllEqual(outx.shape.as_list(), test)
return
@parameterized.named_parameters(("filters", 3))
def test_gradient_pass_though(self, filters):
......@@ -52,7 +51,6 @@ class DarkConvTest(tf.test.TestCase, parameterized.TestCase):
grad = tape.gradient(grad_loss, test_layer.trainable_variables)
optimizer.apply_gradients(zip(grad, test_layer.trainable_variables))
self.assertNotIn(None, grad)
return
if __name__ == "__main__":
tf.test.main()
......@@ -24,7 +24,6 @@ class DarkResidualTest(tf.test.TestCase, parameterized.TestCase):
outx.shape.as_list(),
[None, np.ceil(width / mod),
np.ceil(height / mod), filters])
return
@parameterized.named_parameters(("same", 64, 224, 224, False),
("downsample", 32, 223, 223, True),
......@@ -54,7 +53,6 @@ class DarkResidualTest(tf.test.TestCase, parameterized.TestCase):
optimizer.apply_gradients(zip(grad, test_layer.trainable_variables))
self.assertNotIn(None, grad)
return
if __name__ == "__main__":
......
......@@ -20,7 +20,6 @@ class DarkTinyTest(tf.test.TestCase, parameterized.TestCase):
self.assertEqual(height % strides, 0, msg="height % strides != 0")
self.assertAllEqual(outx.shape.as_list(),
[None, width // strides, height // strides, filters])
return
@parameterized.named_parameters(("middle", 224, 224, 64, 2),
("last", 224, 224, 1024, 1))
......@@ -43,7 +42,6 @@ class DarkTinyTest(tf.test.TestCase, parameterized.TestCase):
optimizer.apply_gradients(zip(grad, test_layer.trainable_variables))
self.assertNotIn(None, grad)
return
if __name__ == "__main__":
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment