Commit 54a70bac authored by Fan Yang's avatar Fan Yang Committed by A. Unique TensorFlower
Browse files

Internal change

PiperOrigin-RevId: 481804149
parent cc0b9a7a
...@@ -20,10 +20,6 @@ from official.projects.basnet.tasks import basnet ...@@ -20,10 +20,6 @@ from official.projects.basnet.tasks import basnet
from official.vision.serving import semantic_segmentation from official.vision.serving import semantic_segmentation
MEAN_RGB = (0.485 * 255, 0.456 * 255, 0.406 * 255)
STDDEV_RGB = (0.229 * 255, 0.224 * 255, 0.225 * 255)
class BASNetModule(semantic_segmentation.SegmentationModule): class BASNetModule(semantic_segmentation.SegmentationModule):
"""BASNet Module.""" """BASNet Module."""
......
...@@ -33,7 +33,7 @@ from official.vision.dataloaders import segmentation_input ...@@ -33,7 +33,7 @@ from official.vision.dataloaders import segmentation_input
def build_basnet_model( def build_basnet_model(
input_specs: tf.keras.layers.InputSpec, input_specs: tf.keras.layers.InputSpec,
model_config: exp_cfg.BASNetModel, model_config: exp_cfg.BASNetModel,
l2_regularizer: tf.keras.regularizers.Regularizer = None): l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None):
"""Builds BASNet model.""" """Builds BASNet model."""
norm_activation_config = model_config.norm_activation norm_activation_config = model_config.norm_activation
backbone = basnet_model.BASNetEncoder( backbone = basnet_model.BASNetEncoder(
......
...@@ -23,9 +23,6 @@ from official.vision.dataloaders import parser ...@@ -23,9 +23,6 @@ from official.vision.dataloaders import parser
from official.vision.ops import augment from official.vision.ops import augment
from official.vision.ops import preprocess_ops from official.vision.ops import preprocess_ops
MEAN_RGB = (0.485 * 255, 0.456 * 255, 0.406 * 255)
STDDEV_RGB = (0.229 * 255, 0.224 * 255, 0.225 * 255)
DEFAULT_IMAGE_FIELD_KEY = 'image/encoded' DEFAULT_IMAGE_FIELD_KEY = 'image/encoded'
DEFAULT_LABEL_FIELD_KEY = 'image/class/label' DEFAULT_LABEL_FIELD_KEY = 'image/class/label'
...@@ -223,7 +220,7 @@ class Parser(parser.Parser): ...@@ -223,7 +220,7 @@ class Parser(parser.Parser):
# Normalizes image with mean and std pixel values. # Normalizes image with mean and std pixel values.
image = preprocess_ops.normalize_image( image = preprocess_ops.normalize_image(
image, offset=MEAN_RGB, scale=STDDEV_RGB) image, offset=preprocess_ops.MEAN_RGB, scale=preprocess_ops.STDDEV_RGB)
# Random erasing after the image has been normalized # Random erasing after the image has been normalized
if self._random_erasing is not None: if self._random_erasing is not None:
...@@ -258,7 +255,7 @@ class Parser(parser.Parser): ...@@ -258,7 +255,7 @@ class Parser(parser.Parser):
# Normalizes image with mean and std pixel values. # Normalizes image with mean and std pixel values.
image = preprocess_ops.normalize_image( image = preprocess_ops.normalize_image(
image, offset=MEAN_RGB, scale=STDDEV_RGB) image, offset=preprocess_ops.MEAN_RGB, scale=preprocess_ops.STDDEV_RGB)
# Convert image to self._dtype. # Convert image to self._dtype.
image = tf.image.convert_image_dtype(image, self._dtype) image = tf.image.convert_image_dtype(image, self._dtype)
...@@ -284,6 +281,6 @@ class Parser(parser.Parser): ...@@ -284,6 +281,6 @@ class Parser(parser.Parser):
# Normalizes image with mean and std pixel values. # Normalizes image with mean and std pixel values.
image = preprocess_ops.normalize_image( image = preprocess_ops.normalize_image(
image, offset=MEAN_RGB, scale=STDDEV_RGB) image, offset=preprocess_ops.MEAN_RGB, scale=preprocess_ops.STDDEV_RGB)
image.set_shape(input_image_size + [num_channels]) image.set_shape(input_image_size + [num_channels])
return image return image
...@@ -26,9 +26,6 @@ from official.vision.dataloaders import decoder ...@@ -26,9 +26,6 @@ from official.vision.dataloaders import decoder
from official.vision.dataloaders import parser from official.vision.dataloaders import parser
from official.vision.ops import preprocess_ops from official.vision.ops import preprocess_ops
MEAN_RGB = (0.485 * 255, 0.456 * 255, 0.406 * 255)
STDDEV_RGB = (0.229 * 255, 0.224 * 255, 0.225 * 255)
class Decoder(decoder.Decoder): class Decoder(decoder.Decoder):
"""A tf.Example decoder for classification task.""" """A tf.Example decoder for classification task."""
...@@ -102,7 +99,7 @@ class Parser(parser.Parser): ...@@ -102,7 +99,7 @@ class Parser(parser.Parser):
# Normalizes image with mean and std pixel values. # Normalizes image with mean and std pixel values.
image = preprocess_ops.normalize_image( image = preprocess_ops.normalize_image(
image, offset=MEAN_RGB, scale=STDDEV_RGB) image, offset=preprocess_ops.MEAN_RGB, scale=preprocess_ops.STDDEV_RGB)
image = tf.image.convert_image_dtype(image, self._dtype) image = tf.image.convert_image_dtype(image, self._dtype)
return image, label return image, label
......
...@@ -27,10 +27,6 @@ from official.vision.ops import preprocess_ops ...@@ -27,10 +27,6 @@ from official.vision.ops import preprocess_ops
from official.vision.serving import export_base from official.vision.serving import export_base
MEAN_RGB = (0.485 * 255, 0.456 * 255, 0.406 * 255)
STDDEV_RGB = (0.229 * 255, 0.224 * 255, 0.225 * 255)
class DetectionModule(export_base.ExportModule): class DetectionModule(export_base.ExportModule):
"""Detection Module.""" """Detection Module."""
...@@ -74,9 +70,8 @@ class DetectionModule(export_base.ExportModule): ...@@ -74,9 +70,8 @@ class DetectionModule(export_base.ExportModule):
"""Builds detection model inputs for serving.""" """Builds detection model inputs for serving."""
model_params = self.params.task.model model_params = self.params.task.model
# Normalizes image with mean and std pixel values. # Normalizes image with mean and std pixel values.
image = preprocess_ops.normalize_image(image, image = preprocess_ops.normalize_image(
offset=MEAN_RGB, image, offset=preprocess_ops.MEAN_RGB, scale=preprocess_ops.STDDEV_RGB)
scale=STDDEV_RGB)
image, image_info = preprocess_ops.resize_and_crop_image( image, image_info = preprocess_ops.resize_and_crop_image(
image, image,
......
...@@ -21,10 +21,6 @@ from official.vision.ops import preprocess_ops ...@@ -21,10 +21,6 @@ from official.vision.ops import preprocess_ops
from official.vision.serving import export_base from official.vision.serving import export_base
MEAN_RGB = (0.485 * 255, 0.456 * 255, 0.406 * 255)
STDDEV_RGB = (0.229 * 255, 0.224 * 255, 0.225 * 255)
class ClassificationModule(export_base.ExportModule): class ClassificationModule(export_base.ExportModule):
"""classification Module.""" """classification Module."""
...@@ -50,9 +46,8 @@ class ClassificationModule(export_base.ExportModule): ...@@ -50,9 +46,8 @@ class ClassificationModule(export_base.ExportModule):
image, [self._input_image_size[0], self._input_image_size[1], 3]) image, [self._input_image_size[0], self._input_image_size[1], 3])
# Normalizes image with mean and std pixel values. # Normalizes image with mean and std pixel values.
image = preprocess_ops.normalize_image(image, image = preprocess_ops.normalize_image(
offset=MEAN_RGB, image, offset=preprocess_ops.MEAN_RGB, scale=preprocess_ops.STDDEV_RGB)
scale=STDDEV_RGB)
return image return image
def serve(self, images): def serve(self, images):
......
...@@ -21,10 +21,6 @@ from official.vision.ops import preprocess_ops ...@@ -21,10 +21,6 @@ from official.vision.ops import preprocess_ops
from official.vision.serving import export_base from official.vision.serving import export_base
MEAN_RGB = (0.485 * 255, 0.456 * 255, 0.406 * 255)
STDDEV_RGB = (0.229 * 255, 0.224 * 255, 0.225 * 255)
class SegmentationModule(export_base.ExportModule): class SegmentationModule(export_base.ExportModule):
"""Segmentation Module.""" """Segmentation Module."""
...@@ -41,9 +37,8 @@ class SegmentationModule(export_base.ExportModule): ...@@ -41,9 +37,8 @@ class SegmentationModule(export_base.ExportModule):
"""Builds classification model inputs for serving.""" """Builds classification model inputs for serving."""
# Normalizes image with mean and std pixel values. # Normalizes image with mean and std pixel values.
image = preprocess_ops.normalize_image(image, image = preprocess_ops.normalize_image(
offset=MEAN_RGB, image, offset=preprocess_ops.MEAN_RGB, scale=preprocess_ops.STDDEV_RGB)
scale=STDDEV_RGB)
if self.params.task.train_data.preserve_aspect_ratio: if self.params.task.train_data.preserve_aspect_ratio:
image, image_info = preprocess_ops.resize_and_crop_image( image, image_info = preprocess_ops.resize_and_crop_image(
......
...@@ -21,9 +21,6 @@ from official.vision.dataloaders import video_input ...@@ -21,9 +21,6 @@ from official.vision.dataloaders import video_input
from official.vision.serving import export_base from official.vision.serving import export_base
from official.vision.tasks import video_classification from official.vision.tasks import video_classification
MEAN_RGB = (0.485 * 255, 0.456 * 255, 0.406 * 255)
STDDEV_RGB = (0.229 * 255, 0.224 * 255, 0.225 * 255)
class VideoClassificationModule(export_base.ExportModule): class VideoClassificationModule(export_base.ExportModule):
"""Video classification Module.""" """Video classification Module."""
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment