Commit 8ee48095 authored by vishnubanna's avatar vishnubanna
Browse files

fixed module imports

parent a0dd0e8a
......@@ -4,3 +4,4 @@ from .dark_tiny import DarkTiny
from .csp_connect import CSPConnect
from .csp_downsample import CSPDownSample
from .csp_tiny import CSPTiny
from .identity import Identity
\ No newline at end of file
......@@ -3,9 +3,7 @@ import tensorflow.keras as ks
import numpy as np
from absl.testing import parameterized
from official.vision.beta.projects.yolo.modeling.building_blocks import CSPDownSample as layer
from official.vision.beta.projects.yolo.modeling.building_blocks import CSPConnect as layer_companion
from official.vision.beta.projects.yolo.modeling import building_blocks as nn_blocks
class CSPConnect(tf.test.TestCase, parameterized.TestCase):
......@@ -13,8 +11,8 @@ class CSPConnect(tf.test.TestCase, parameterized.TestCase):
("downsample", 224, 224, 64, 2))
def test_pass_through(self, width, height, filters, mod):
x = ks.Input(shape=(width, height, filters))
test_layer = layer(filters=filters, filter_reduce=mod)
test_layer2 = layer_companion(filters=filters, filter_reduce=mod)
test_layer = nn_blocks.CSPDownSample(filters=filters, filter_reduce=mod)
test_layer2 = nn_blocks.CSPConnect(filters=filters, filter_reduce=mod)
outx, px = test_layer(x)
outx = test_layer2([outx, px])
print(outx)
......@@ -29,8 +27,8 @@ class CSPConnect(tf.test.TestCase, parameterized.TestCase):
def test_gradient_pass_though(self, filters, width, height, mod):
loss = ks.losses.MeanSquaredError()
optimizer = ks.optimizers.SGD()
test_layer = layer(filters, filter_reduce=mod)
path_layer = layer_companion(filters, filter_reduce=mod)
test_layer = nn_blocks.CSPDownSample(filters, filter_reduce=mod)
path_layer = nn_blocks.CSPConnect(filters, filter_reduce=mod)
init = tf.random_normal_initializer()
x = tf.Variable(
......
......@@ -3,9 +3,7 @@ import tensorflow.keras as ks
import numpy as np
from absl.testing import parameterized
from official.vision.beta.projects.yolo.modeling.building_blocks import CSPDownSample as layer
from official.vision.beta.projects.yolo.modeling.building_blocks import CSPConnect as layer_companion
from official.vision.beta.projects.yolo.modeling import building_blocks as nn_blocks
class CSPDownSample(tf.test.TestCase, parameterized.TestCase):
......@@ -13,7 +11,7 @@ class CSPDownSample(tf.test.TestCase, parameterized.TestCase):
("downsample", 224, 224, 64, 2))
def test_pass_through(self, width, height, filters, mod):
x = ks.Input(shape=(width, height, filters))
test_layer = layer(filters=filters, filter_reduce=mod)
test_layer = nn_blocks.CSPDownSample(filters=filters, filter_reduce=mod)
outx, px = test_layer(x)
print(outx)
print(outx.shape.as_list())
......@@ -27,8 +25,8 @@ class CSPDownSample(tf.test.TestCase, parameterized.TestCase):
def test_gradient_pass_though(self, filters, width, height, mod):
loss = ks.losses.MeanSquaredError()
optimizer = ks.optimizers.SGD()
test_layer = layer(filters, filter_reduce=mod)
path_layer = layer_companion(filters, filter_reduce=mod)
test_layer = nn_blocks.CSPDownSample(filters, filter_reduce=mod)
path_layer = nn_blocks.CSPConnect(filters, filter_reduce=mod)
init = tf.random_normal_initializer()
x = tf.Variable(
......
......@@ -2,8 +2,7 @@ import tensorflow as tf
import tensorflow.keras as ks
import tensorflow_datasets as tfds
from absl.testing import parameterized
from official.vision.beta.projects.yolo.modeling.building_blocks import DarkConv
from official.vision.beta.projects.yolo.modeling import building_blocks as nn_blocks
class DarkConvTest(tf.test.TestCase, parameterized.TestCase):
......@@ -17,7 +16,7 @@ class DarkConvTest(tf.test.TestCase, parameterized.TestCase):
else:
pad_const = 0
x = ks.Input(shape=(224, 224, 3))
test_layer = DarkConv(filters=64,
test_layer = nn_blocks.DarkConv(filters=64,
kernel_size=kernel_size,
padding=padding,
strides=strides,
......@@ -37,7 +36,7 @@ class DarkConvTest(tf.test.TestCase, parameterized.TestCase):
loss = ks.losses.MeanSquaredError()
optimizer = ks.optimizers.SGD()
with tf.device("/CPU:0"):
test_layer = DarkConv(filters, kernel_size=(3, 3), padding="same")
test_layer = nn_blocks.DarkConv(filters, kernel_size=(3, 3), padding="same")
init = tf.random_normal_initializer()
x = tf.Variable(initial_value=init(shape=(1, 224, 224,
......
......@@ -3,8 +3,7 @@ import tensorflow.keras as ks
import numpy as np
from absl.testing import parameterized
from official.vision.beta.projects.yolo.modeling.building_blocks import DarkResidual as layer
from official.vision.beta.projects.yolo.modeling import building_blocks as nn_blocks
class DarkResidualTest(tf.test.TestCase, parameterized.TestCase):
......@@ -16,7 +15,7 @@ class DarkResidualTest(tf.test.TestCase, parameterized.TestCase):
if downsample:
mod = 2
x = ks.Input(shape=(width, height, filters))
test_layer = layer(filters=filters, downsample=downsample)
test_layer = nn_blocks.DarkResidual(filters=filters, downsample=downsample)
outx = test_layer(x)
print(outx)
print(outx.shape.as_list())
......@@ -31,7 +30,7 @@ class DarkResidualTest(tf.test.TestCase, parameterized.TestCase):
def test_gradient_pass_though(self, filters, width, height, downsample):
loss = ks.losses.MeanSquaredError()
optimizer = ks.optimizers.SGD()
test_layer = layer(filters, downsample=downsample)
test_layer = nn_blocks.DarkResidual(filters, downsample=downsample)
if downsample:
mod = 2
......
......@@ -3,7 +3,7 @@ import tensorflow.keras as ks
import numpy as np
from absl.testing import parameterized
from official.vision.beta.projects.yolo.modeling.building_blocks import DarkTiny
from official.vision.beta.projects.yolo.modeling import building_blocks as nn_blocks
class DarkTinyTest(tf.test.TestCase, parameterized.TestCase):
......@@ -12,7 +12,7 @@ class DarkTinyTest(tf.test.TestCase, parameterized.TestCase):
("last", 224, 224, 1024, 1))
def test_pass_through(self, width, height, filters, strides):
x = ks.Input(shape=(width, height, filters))
test_layer = DarkTiny(filters=filters, strides=strides)
test_layer = nn_blocks.DarkTiny(filters=filters, strides=strides)
outx = test_layer(x)
self.assertEqual(width % strides, 0, msg="width % strides != 0")
self.assertEqual(height % strides, 0, msg="height % strides != 0")
......@@ -24,7 +24,7 @@ class DarkTinyTest(tf.test.TestCase, parameterized.TestCase):
def test_gradient_pass_though(self, width, height, filters, strides):
loss = ks.losses.MeanSquaredError()
optimizer = ks.optimizers.SGD()
test_layer = DarkTiny(filters=filters, strides=strides)
test_layer = nn_blocks.DarkTiny(filters=filters, strides=strides)
init = tf.random_normal_initializer()
x = tf.Variable(
......
runtime:
all_reduce_alg: null
batchnorm_spatial_persistent: false
dataset_num_private_threads: null
default_shard_dim: -1
distribution_strategy: mirrored
enable_xla: false
gpu_thread_mode: null
loss_scale: null
mixed_precision_dtype: float32
num_cores_per_replica: 1
num_gpus: 0
num_packs: 1
per_gpu_thread_count: 0
run_eagerly: false
task_index: -1
tpu: null
worker_hosts: null
task:
gradient_clip_norm: 0.0
init_checkpoint: ''
logging_dir: null
losses:
l2_weight_decay: 0.0005
label_smoothing: 0.0
one_hot: true
model:
add_head_batch_norm: false
backbone:
darknet:
model_id: cspdarknettiny
type: darknet
dropout_rate: 0.0
input_size: [224, 224, 3]
norm_activation:
activation: relu
norm_epsilon: 0.001
norm_momentum: 0.99
use_sync_bn: false
num_classes: 1001
train_data:
block_length: 1
cache: false
cycle_length: 10
deterministic: null
drop_remainder: true
dtype: float16
enable_tf_data_service: false
global_batch_size: 128
input_path: imagenet-2012-tfrecord/train*
is_training: true
sharding: true
shuffle_buffer_size: 10000
tf_data_service_address: null
tf_data_service_job_name: null
tfds_as_supervised: false
tfds_data_dir: ''
tfds_download: false
tfds_name: ''
tfds_skip_decoding_feature: ''
tfds_split: ''
validation_data:
block_length: 1
cache: false
cycle_length: 10
deterministic: null
drop_remainder: false
dtype: float16
enable_tf_data_service: false
global_batch_size: 128
input_path: imagenet-2012-tfrecord/valid*
is_training: true
sharding: true
shuffle_buffer_size: 10000
tf_data_service_address: null
tf_data_service_job_name: null
tfds_as_supervised: false
tfds_data_dir: ''
tfds_download: false
tfds_name: ''
tfds_skip_decoding_feature: ''
tfds_split: ''
trainer:
allow_tpu_summary: false
best_checkpoint_eval_metric: ''
best_checkpoint_export_subdir: ''
best_checkpoint_metric_comp: higher
checkpoint_interval: 10000
continuous_eval_timeout: 3600
eval_tf_function: true
max_to_keep: 5
optimizer_config:
ema: null
learning_rate:
polynomial:
cycle: false
decay_steps: 799000
end_learning_rate: 0.0001
initial_learning_rate: 0.1
name: PolynomialDecay
power: 4.0
type: polynomial
optimizer:
sgd:
clipnorm: null
clipvalue: null
decay: 0.0
momentum: 0.9
name: SGD
nesterov: false
type: sgd
warmup:
linear:
name: linear
warmup_learning_rate: 0
warmup_steps: 1000
type: linear
steps_per_loop: 10000
summary_interval: 10000
train_steps: 800000
train_tf_function: true
train_tf_while_loop: true
validation_interval: 10000
validation_steps: 400
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment