Unverified Commit 8b641b13 authored by Srihari Humbarwadi's avatar Srihari Humbarwadi Committed by GitHub
Browse files

Merge branch 'tensorflow:master' into panoptic-deeplab

parents 7cffacfe 357fa547
......@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Configuration definitions for ResNet losses, learning rates, and optimizers."""
from __future__ import absolute_import
from __future__ import division
......
......@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Configuration definitions for VGG losses, learning rates, and optimizers."""
import dataclasses
......
......@@ -23,9 +23,11 @@ from official.core import task_factory
from official.modeling import optimization
from official.modeling.multitask import base_model
from official.modeling.multitask import configs
from official.modeling.privacy import configs as dp_configs
OptimizationConfig = optimization.OptimizationConfig
RuntimeConfig = config_definitions.RuntimeConfig
DifferentialPrivacyConfig = dp_configs.DifferentialPrivacyConfig
class MultiTask(tf.Module, metaclass=abc.ABCMeta):
......@@ -93,9 +95,11 @@ class MultiTask(tf.Module, metaclass=abc.ABCMeta):
@classmethod
def create_optimizer(cls,
optimizer_config: OptimizationConfig,
runtime_config: Optional[RuntimeConfig] = None):
runtime_config: Optional[RuntimeConfig] = None,
dp_config: Optional[DifferentialPrivacyConfig] = None):
return base_task.Task.create_optimizer(
optimizer_config=optimizer_config, runtime_config=runtime_config)
optimizer_config=optimizer_config, runtime_config=runtime_config,
dp_config=dp_config)
def joint_train_step(self, task_inputs,
multi_task_model: base_model.MultiTaskBaseModel,
......
......@@ -66,8 +66,7 @@ def run_experiment(
is_training = 'train' in mode
is_eval = 'eval' in mode
with distribution_strategy.scope():
optimizer = task.create_optimizer(params.trainer.optimizer_config,
params.runtime)
optimizer = train_utils.create_optimizer(task, params)
kwargs = dict(multi_task=task, multi_task_model=model, optimizer=optimizer)
if params.trainer.trainer_type == 'interleaving':
sampler = task_sampler.get_task_sampler(params.trainer.task_sampler,
......@@ -183,8 +182,7 @@ def run_experiment_with_multitask_eval(
config=params,
task=train_task,
model=train_task.build_model(),
optimizer=train_task.create_optimizer(params.trainer.optimizer_config,
params.runtime),
optimizer=train_utils.create_optimizer(train_task, params),
train=True,
evaluate=False)
else:
......
......@@ -28,9 +28,9 @@ from official.nlp import optimization as nlp_optimization
OPTIMIZERS_CLS = {
'sgd': tf.keras.optimizers.SGD,
'sgd_experimental': tf.keras.optimizers.experimental.SGD,
# TODO(chenmoneygithub): experimental.SGD
'adam': tf.keras.optimizers.Adam,
'adam_experimental': tf.keras.optimizers.experimental.Adam,
# TODO(chenmoneygithub): experimental.Adam
'adamw': nlp_optimization.AdamWeightDecay,
'lamb': tfa_optimizers.LAMB,
'rmsprop': tf.keras.optimizers.RMSprop,
......
......@@ -12,9 +12,13 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Decoders package definition."""
"""Configs for differential privacy."""
from official.vision.beta.modeling.decoders.aspp import ASPP
from official.vision.beta.modeling.decoders.fpn import FPN
from official.vision.beta.modeling.decoders.nasfpn import NASFPN
from official.modeling.hyperparams import base_config
class DifferentialPrivacyConfig(base_config.Config):
# Applied to the gradients
# Setting to a large number so nothing is clipped.
clipping_norm: float = 100000000.0 # 10^9
noise_multiplier: float = 0.0
......@@ -12,30 +12,29 @@
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for roi_aligner.py."""
"""Tests for configs."""
# Import libraries
import tensorflow as tf
from official.modeling.privacy import configs
from official.vision.beta.modeling.layers import roi_aligner
class ConfigsTest(tf.test.TestCase):
class MultilevelROIAlignerTest(tf.test.TestCase):
def test_clipping_norm_default(self):
clipping_norm = configs.DifferentialPrivacyConfig().clipping_norm
self.assertEqual(100000000.0, clipping_norm)
def test_serialize_deserialize(self):
kwargs = dict(
crop_size=7,
sample_offset=0.5,
)
aligner = roi_aligner.MultilevelROIAligner(**kwargs)
def test_noise_multiplier_default(self):
noise_multiplier = configs.DifferentialPrivacyConfig().noise_multiplier
self.assertEqual(0.0, noise_multiplier)
expected_config = dict(kwargs)
self.assertEqual(aligner.get_config(), expected_config)
new_aligner = roi_aligner.MultilevelROIAligner.from_config(
aligner.get_config())
self.assertAllEqual(aligner.get_config(), new_aligner.get_config())
def test_config(self):
dp_config = configs.DifferentialPrivacyConfig({
'clipping_norm': 1.0,
'noise_multiplier': 1.0
})
self.assertEqual(1.0, dp_config.clipping_norm)
self.assertEqual(1.0, dp_config.noise_multiplier)
if __name__ == '__main__':
......
......@@ -12,27 +12,31 @@
# See the License for the specific language governing permissions and
# limitations under the License.
"""TFDS Classification decoders."""
"""Ops for differential privacy (gradient) transforms."""
from typing import List, Tuple
import tensorflow as tf
from official.vision.beta.dataloaders import decoder
class ClassificationDecorder(decoder.Decoder):
"""A tf.Example decoder for tfds classification datasets."""
def clip_l2_norm(grads_vars: List[Tuple[tf.Tensor, tf.Tensor]],
l2_norm_clip: float) -> List[Tuple[tf.Tensor, tf.Tensor]]:
"""Clip gradients by global norm."""
def decode(self, serialized_example):
sample_dict = {
'image/encoded':
tf.io.encode_jpeg(serialized_example['image'], quality=100),
'image/class/label':
serialized_example['label'],
}
return sample_dict
gradients = []
variables = []
for (g, v) in grads_vars:
gradients.append(g)
variables.append(v)
clipped_gradients = tf.clip_by_global_norm(gradients, l2_norm_clip)[0]
return list(zip(clipped_gradients, variables))
TFDS_ID_TO_DECODER_MAP = {
'cifar10': ClassificationDecorder,
'cifar100': ClassificationDecorder,
'imagenet2012': ClassificationDecorder,
}
def add_noise(grads_vars: List[Tuple[tf.Tensor, tf.Tensor]],
noise_stddev: float) -> List[Tuple[tf.Tensor, tf.Tensor]]:
"""Add noise to gradients."""
ret = []
for (g, v) in grads_vars:
noise = tf.random.normal(tf.shape(g), stddev=noise_stddev)
ret.append((g + noise, v))
return ret
......@@ -12,43 +12,40 @@
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ops."""
"""Tests for mask_ops.py."""
from unittest import mock
# Import libraries
import numpy as np
import tensorflow as tf
from official.vision.beta.ops import mask_ops
class MaskUtilsTest(tf.test.TestCase):
def testPasteInstanceMasks(self):
image_height = 10
image_width = 10
mask_height = 6
mask_width = 6
masks = np.random.randint(0, 255, (1, mask_height, mask_width))
detected_boxes = np.array([[0.0, 2.0, mask_width, mask_height]])
_ = mask_ops.paste_instance_masks(
masks, detected_boxes, image_height, image_width)
def testPasteInstanceMasksV2(self):
image_height = 10
image_width = 10
mask_height = 6
mask_width = 6
masks = np.random.randint(0, 255, (1, mask_height, mask_width))
detected_boxes = np.array([[0.0, 2.0, mask_width, mask_height]])
image_masks = mask_ops.paste_instance_masks_v2(
masks, detected_boxes, image_height, image_width)
self.assertNDArrayNear(
image_masks[:, 2:8, 0:6],
np.array(masks > 0.5, dtype=np.uint8),
1e-5)
from official.modeling.privacy import ops
class OpsTest(tf.test.TestCase):
def test_clip_l2_norm(self):
x = tf.constant([4.0, 3.0])
y = tf.constant([[12.0]])
tensors = [(x, x), (y, y)]
clipped = ops.clip_l2_norm(tensors, 1.0)
for a, b in zip(clipped, tensors):
self.assertAllClose(a[0], b[0] / 13.0) # sqrt(4^2 + 3^2 + 12 ^3) = 13
self.assertAllClose(a[1], b[1])
@mock.patch.object(tf.random,
'normal',
autospec=True)
def test_add_noise(self, mock_random):
x = tf.constant([0.0, 0.0])
y = tf.constant([[0.0]])
tensors = [(x, x), (y, y)]
mock_random.side_effect = [tf.constant([1.0, 1.0]), tf.constant([[1.0]])]
added = ops.add_noise(tensors, 10.0)
for a, b in zip(added, tensors):
self.assertAllClose(a[0], b[0] + 1.0)
self.assertAllClose(a[1], b[1])
_, kwargs = mock_random.call_args
self.assertEqual(kwargs['stddev'], 10.0)
if __name__ == '__main__':
......
......@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: disable=g-doc-return-or-yield,line-too-long
"""WMT translation configurations."""
......
......@@ -124,7 +124,7 @@ class DualEncoderDataLoader(data_loader.DataLoader):
raise ValueError('Expected {} to start with {}'.format(string, old))
def _switch_key_prefix(d, old, new):
return {_switch_prefix(key, old, new): value for key, value in d.items()}
return {_switch_prefix(key, old, new): value for key, value in d.items()} # pytype: disable=attribute-error # trace-all-classes
model_inputs = _switch_key_prefix(
self._bert_tokenize(record, self._left_text_fields),
......
......@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Definitions for random feature Gaussian process layer."""
import math
import tensorflow as tf
......
......@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for Gaussian process functions."""
import os
import shutil
......
......@@ -1004,6 +1004,7 @@ class T5TransformerParams:
num_heads: int
d_ff: int
vocab_size: int
target_vocab_size: Optional[int] = None
dropout_rate: float = 0.0
layer_norm_epsilon: float = 1e-6
shared_embedding: bool = False
......@@ -1159,11 +1160,15 @@ class Decoder(Module):
self.compute_dtype = compute_dtype
if self.config.num_decoder_layers is None:
self.config.num_decoder_layers = self.config.num_layers
if not hasattr(
self.config,
"target_vocab_size") or self.config.target_vocab_size is None:
self.config.target_vocab_size = self.config.vocab_size
with self.name_scope:
# Target Embedding.
if shared_embedding is None:
self.target_embed = Embed(
vocab_size=self.config.vocab_size,
vocab_size=self.config.target_vocab_size,
features=self.config.d_model,
embeddings_initializer=self.config.vocab_embeddings_initializer,
dtype=self.dtype,
......@@ -1211,7 +1216,7 @@ class Decoder(Module):
if not self.config.logits_via_embedding:
self.logits_dense = Linear(
in_features=self.config.d_model,
out_features=self.config.vocab_size,
out_features=self.config.target_vocab_size,
use_bias=False,
dtype=self.dtype,
name="logits")
......
......@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Definitions for AssembleNet/++ structures.
This structure is a `list` corresponding to a graph representation of the
......
......@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
from absl.testing import parameterized
import tensorflow as tf
from official.core import config_definitions as cfg
......
......@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Contains definitions for the AssembleNet [1] models.
Requires the AssembleNet architecture to be specified in
......
......@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Contains definitions for 'Representation Flow' layer [1].
Representation flow layer is a generalization of optical flow extraction; the
......
......@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
r"""Training driver.
Commandline:
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment