"examples/vscode:/vscode.git/clone" did not exist on "030646c004e652853628706de350b159f0912da9"
Commit f167dff9 authored by zhanggzh's avatar zhanggzh
Browse files

Deleted models-2.13.1/.github/ISSUE_TEMPLATE/00-official-bug-report-issue.md,...

Deleted models-2.13.1/.github/ISSUE_TEMPLATE/00-official-bug-report-issue.md, models-2.13.1/.github/ISSUE_TEMPLATE/10-official-documentation-issue.md, models-2.13.1/.github/ISSUE_TEMPLATE/20-official-feature-request-issue.md, models-2.13.1/.github/ISSUE_TEMPLATE/30-research-bug-report-issue.md, models-2.13.1/.github/ISSUE_TEMPLATE/40-research-documentation-issue.md, models-2.13.1/.github/ISSUE_TEMPLATE/50-research-feature-request-issue.md, models-2.13.1/.github/ISSUE_TEMPLATE/60-questions-help-issue.md, models-2.13.1/.github/ISSUE_TEMPLATE/config.yml, models-2.13.1/.github/PULL_REQUEST_TEMPLATE.md, models-2.13.1/.github/README_TEMPLATE.md, models-2.13.1/docs/nlp/_guide_toc.yaml, models-2.13.1/docs/nlp/customize_encoder.ipynb, models-2.13.1/docs/nlp/decoding_api.ipynb, models-2.13.1/docs/nlp/fine_tune_bert.ipynb, models-2.13.1/docs/nlp/index.ipynb, models-2.13.1/docs/nlp/load_lm_ckpts.ipynb, models-2.13.1/docs/orbit/index.ipynb, models-2.13.1/docs/vision/_toc.yaml, models-2.13.1/docs/vision/image_classification.ipynb, models-2.13.1/docs/vision/instance_segmentation.ipynb, models-2.13.1/docs/vision/object_detection.ipynb, models-2.13.1/docs/vision/semantic_segmentation.ipynb, models-2.13.1/docs/README.md, models-2.13.1/docs/index.md, models-2.13.1/official/benchmark/datastore/schema/benchmark_metric.json, models-2.13.1/official/benchmark/datastore/schema/benchmark_run.json, models-2.13.1/official/benchmark/datastore/schema/benchmark_run_status.json, models-2.13.1/official/benchmark/models/shakespeare/README.md, models-2.13.1/official/benchmark/models/shakespeare/__init__.py, models-2.13.1/official/benchmark/models/shakespeare/shakespeare_main.py, models-2.13.1/official/benchmark/models/__init__.py, models-2.13.1/official/benchmark/models/cifar_preprocessing.py, models-2.13.1/official/benchmark/models/resnet_cifar_main.py, models-2.13.1/official/benchmark/models/resnet_cifar_model.py, models-2.13.1/official/benchmark/models/resnet_cifar_test.py, models-2.13.1/official/benchmark/models/resnet_imagenet_main.py, models-2.13.1/official/benchmark/models/resnet_imagenet_test.py, models-2.13.1/official/benchmark/models/resnet_imagenet_test_tpu.py, models-2.13.1/official/benchmark/models/synthetic_util.py, models-2.13.1/official/benchmark/__init__.py, models-2.13.1/official/benchmark/base_benchmark.py, models-2.13.1/official/benchmark/benchmark_definitions.py, models-2.13.1/official/benchmark/benchmark_lib.py, models-2.13.1/official/benchmark/benchmark_lib_test.py, models-2.13.1/official/benchmark/benchmark_wrappers.py, models-2.13.1/official/benchmark/bert_benchmark.py, models-2.13.1/official/benchmark/bert_benchmark_utils.py, models-2.13.1/official/benchmark/bert_pretrain_benchmark.py, models-2.13.1/official/benchmark/bert_squad_benchmark.py, models-2.13.1/official/benchmark/config_utils.py, models-2.13.1/official/benchmark/keras_benchmark.py, models-2.13.1/official/benchmark/keras_cifar_benchmark.py, models-2.13.1/official/benchmark/keras_imagenet_benchmark.py, models-2.13.1/official/benchmark/ncf_keras_benchmark.py, models-2.13.1/official/benchmark/nhnet_benchmark.py, models-2.13.1/official/benchmark/owner_utils.py, models-2.13.1/official/benchmark/owner_utils_test.py, models-2.13.1/official/benchmark/perfzero_benchmark.py, models-2.13.1/official/benchmark/resnet50_keras_core.py, models-2.13.1/official/benchmark/resnet_ctl_imagenet_benchmark.py, models-2.13.1/official/benchmark/retinanet_benchmark.py, models-2.13.1/official/benchmark/shakespeare_benchmark.py, models-2.13.1/official/benchmark/tf_scan_benchmark.py, models-2.13.1/official/benchmark/tf_vision_saved_model_benchmark.py, models-2.13.1/official/benchmark/tfhub_memory_usage_benchmark.py, models-2.13.1/official/benchmark/tflite_utils.py, models-2.13.1/official/benchmark/transformer_benchmark.py, models-2.13.1/official/common/__init__.py, models-2.13.1/official/common/dataset_fn.py, models-2.13.1/official/common/distribute_utils.py, models-2.13.1/official/common/distribute_utils_test.py, models-2.13.1/official/common/flags.py, models-2.13.1/official/common/registry_imports.py, models-2.13.1/official/common/streamz_counters.py, models-2.13.1/official/core/__init__.py, models-2.13.1/official/core/actions.py, models-2.13.1/official/core/actions_test.py, models-2.13.1/official/core/base_task.py, models-2.13.1/official/core/base_trainer.py, models-2.13.1/official/core/base_trainer_test.py, models-2.13.1/official/core/config_definitions.py, models-2.13.1/official/core/exp_factory.py, models-2.13.1/official/core/export_base.py, models-2.13.1/official/core/export_base_test.py, models-2.13.1/official/core/file_writers.py, models-2.13.1/official/core/file_writers_test.py, models-2.13.1/official/core/input_reader.py, models-2.13.1/official/core/registry.py, models-2.13.1/official/core/registry_test.py, models-2.13.1/official/core/savedmodel_checkpoint_manager.py, models-2.13.1/official/core/savedmodel_checkpoint_manager_test.py, models-2.13.1/official/core/task_factory.py, models-2.13.1/official/core/test_utils.py, models-2.13.1/official/core/tf_example_builder.py, models-2.13.1/official/core/tf_example_builder_test.py, models-2.13.1/official/core/tf_example_feature_key.py, models-2.13.1/official/core/tf_example_feature_key_test.py, models-2.13.1/official/core/train_lib.py, models-2.13.1/official/core/train_lib_test.py, models-2.13.1/official/core/train_utils.py, models-2.13.1/official/core/train_utils_test.py, models-2.13.1/official/legacy/albert/README.md, models-2.13.1/official/legacy/albert/__init__.py, models-2.13.1/official/legacy/albert/configs.py, models-2.13.1/official/legacy/bert/README.md, models-2.13.1/official/legacy/bert/__init__.py, models-2.13.1/official/legacy/bert/bert_cloud_tpu.md, models-2.13.1/official/legacy/bert/bert_models.py, models-2.13.1/official/legacy/bert/bert_models_test.py, models-2.13.1/official/legacy/bert/common_flags.py, models-2.13.1/official/legacy/bert/configs.py, models-2.13.1/official/legacy/bert/export_tfhub.py, models-2.13.1/official/legacy/bert/export_tfhub_test.py, models-2.13.1/official/legacy/bert/input_pipeline.py, models-2.13.1/official/legacy/bert/model_saving_utils.py, models-2.13.1/official/legacy/bert/model_training_utils.py, models-2.13.1/official/legacy/bert/model_training_utils_test.py, models-2.13.1/official/legacy/bert/run_classifier.py, models-2.13.1/official/legacy/bert/run_pretraining.py, models-2.13.1/official/legacy/bert/run_squad.py, models-2.13.1/official/legacy/bert/run_squad_helper.py, models-2.13.1/official/legacy/bert/serving.py, models-2.13.1/official/legacy/detection/configs/__init__.py, models-2.13.1/official/legacy/detection/configs/base_config.py, models-2.13.1/official/legacy/detection/configs/factory.py, models-2.13.1/official/legacy/detection/configs/maskrcnn_config.py, models-2.13.1/official/legacy/detection/configs/olnmask_config.py, models-2.13.1/official/legacy/detection/configs/retinanet_config.py, models-2.13.1/official/legacy/detection/configs/shapemask_config.py, models-2.13.1/official/legacy/detection/dataloader/__init__.py, models-2.13.1/official/legacy/detection/dataloader/anchor.py, models-2.13.1/official/legacy/detection/dataloader/factory.py, models-2.13.1/official/legacy/detection/dataloader/input_reader.py, models-2.13.1/official/legacy/detection/dataloader/maskrcnn_parser.py, models-2.13.1/official/legacy/detection/dataloader/mode_keys.py, models-2.13.1/official/legacy/detection/dataloader/olnmask_parser.py, models-2.13.1/official/legacy/detection/dataloader/retinanet_parser.py, models-2.13.1/official/legacy/detection/dataloader/shapemask_parser.py, models-2.13.1/official/legacy/detection/dataloader/tf_example_decoder.py, models-2.13.1/official/legacy/detection/evaluation/__init__.py, models-2.13.1/official/legacy/detection/evaluation/coco_evaluator.py, models-2.13.1/official/legacy/detection/evaluation/coco_utils.py, models-2.13.1/official/legacy/detection/evaluation/factory.py, models-2.13.1/official/legacy/detection/executor/__init__.py, models-2.13.1/official/legacy/detection/executor/detection_executor.py, models-2.13.1/official/legacy/detection/executor/distributed_executor.py, models-2.13.1/official/legacy/detection/modeling/architecture/__init__.py, models-2.13.1/official/legacy/detection/modeling/architecture/factory.py, models-2.13.1/official/legacy/detection/modeling/architecture/fpn.py, models-2.13.1/official/legacy/detection/modeling/architecture/heads.py, models-2.13.1/official/legacy/detection/modeling/architecture/identity.py, models-2.13.1/official/legacy/detection/modeling/architecture/nn_blocks.py, models-2.13.1/official/legacy/detection/modeling/architecture/nn_ops.py, models-2.13.1/official/legacy/detection/modeling/architecture/resnet.py, models-2.13.1/official/legacy/detection/modeling/architecture/spinenet.py, models-2.13.1/official/legacy/detection/modeling/__init__.py, models-2.13.1/official/legacy/detection/modeling/base_model.py, models-2.13.1/official/legacy/detection/modeling/checkpoint_utils.py, models-2.13.1/official/legacy/detection/modeling/factory.py, models-2.13.1/official/legacy/detection/modeling/learning_rates.py, models-2.13.1/official/legacy/detection/modeling/losses.py, models-2.13.1/official/legacy/detection/modeling/maskrcnn_model.py, models-2.13.1/official/legacy/detection/modeling/olnmask_model.py, models-2.13.1/official/legacy/detection/modeling/optimizers.py, models-2.13.1/official/legacy/detection/modeling/retinanet_model.py, models-2.13.1/official/legacy/detection/modeling/shapemask_model.py, models-2.13.1/official/legacy/detection/ops/__init__.py, models-2.13.1/official/legacy/detection/ops/nms.py, models-2.13.1/official/legacy/detection/ops/postprocess_ops.py, models-2.13.1/official/legacy/detection/ops/roi_ops.py, models-2.13.1/official/legacy/detection/ops/spatial_transform_ops.py, models-2.13.1/official/legacy/detection/ops/target_ops.py, models-2.13.1/official/legacy/detection/utils/__init__.py, models-2.13.1/official/legacy/detection/utils/box_utils.py, models-2.13.1/official/legacy/detection/utils/class_utils.py, models-2.13.1/official/legacy/detection/utils/dataloader_utils.py, models-2.13.1/official/legacy/detection/utils/input_utils.py, models-2.13.1/official/legacy/detection/utils/mask_utils.py, models-2.13.1/official/legacy/detection/README.md, models-2.13.1/official/legacy/detection/__init__.py, models-2.13.1/official/legacy/detection/main.py, models-2.13.1/official/legacy/image_classification/configs/examples/efficientnet/imagenet/efficientnet-b0-gpu.yaml, models-2.13.1/official/legacy/image_classification/configs/examples/efficientnet/imagenet/efficientnet-b0-tpu.yaml, models-2.13.1/official/legacy/image_classification/configs/examples/efficientnet/imagenet/efficientnet-b1-gpu.yaml, models-2.13.1/official/legacy/image_classification/configs/examples/efficientnet/imagenet/efficientnet-b1-tpu.yaml, models-2.13.1/official/legacy/image_classification/configs/examples/resnet/imagenet/gpu.yaml, models-2.13.1/official/legacy/image_classification/configs/examples/resnet/imagenet/tpu.yaml, models-2.13.1/official/legacy/image_classification/configs/examples/vgg16/imagenet/gpu.yaml, models-2.13.1/official/legacy/image_classification/configs/__init__.py, models-2.13.1/official/legacy/image_classification/configs/base_configs.py, models-2.13.1/official/legacy/image_classification/configs/configs.py, models-2.13.1/official/legacy/image_classification/efficientnet/__init__.py, models-2.13.1/official/legacy/image_classification/efficientnet/common_modules.py, models-2.13.1/official/legacy/image_classification/efficientnet/efficientnet_config.py, models-2.13.1/official/legacy/image_classification/efficientnet/efficientnet_model.py, models-2.13.1/official/legacy/image_classification/efficientnet/tfhub_export.py, models-2.13.1/official/legacy/image_classification/resnet/README.md, models-2.13.1/official/legacy/image_classification/resnet/__init__.py, models-2.13.1/official/legacy/image_classification/resnet/common.py, models-2.13.1/official/legacy/image_classification/resnet/imagenet_preprocessing.py, models-2.13.1/official/legacy/image_classification/resnet/resnet_config.py, models-2.13.1/official/legacy/image_classification/resnet/resnet_ctl_imagenet_main.py, models-2.13.1/official/legacy/image_classification/resnet/resnet_model.py, models-2.13.1/official/legacy/image_classification/resnet/resnet_runnable.py, models-2.13.1/official/legacy/image_classification/resnet/tfhub_export.py, models-2.13.1/official/legacy/image_classification/vgg/__init__.py, models-2.13.1/official/legacy/image_classification/vgg/vgg_config.py, models-2.13.1/official/legacy/image_classification/vgg/vgg_model.py, models-2.13.1/official/legacy/image_classification/README.md, models-2.13.1/official/legacy/image_classification/__init__.py, models-2.13.1/official/legacy/image_classification/augment.py, models-2.13.1/official/legacy/image_classification/augment_test.py, models-2.13.1/official/legacy/image_classification/callbacks.py, models-2.13.1/official/legacy/image_classification/classifier_trainer.py, models-2.13.1/official/legacy/image_classification/classifier_trainer_test.py, models-2.13.1/official/legacy/image_classification/classifier_trainer_util_test.py, models-2.13.1/official/legacy/image_classification/dataset_factory.py, models-2.13.1/official/legacy/image_classification/learning_rate.py, models-2.13.1/official/legacy/image_classification/learning_rate_test.py, models-2.13.1/official/legacy/image_classification/mnist_main.py, models-2.13.1/official/legacy/image_classification/mnist_test.py, models-2.13.1/official/legacy/image_classification/optimizer_factory.py, models-2.13.1/official/legacy/image_classification/optimizer_factory_test.py, models-2.13.1/official/legacy/image_classification/preprocessing.py, models-2.13.1/official/legacy/image_classification/test_utils.py, models-2.13.1/official/legacy/transformer/utils/__init__.py, models-2.13.1/official/legacy/transformer/utils/metrics.py, models-2.13.1/official/legacy/transformer/utils/tokenizer.py, models-2.13.1/official/legacy/transformer/utils/tokenizer_test.py, models-2.13.1/official/legacy/transformer/README.md, models-2.13.1/official/legacy/transformer/__init__.py, models-2.13.1/official/legacy/transformer/attention_layer.py, models-2.13.1/official/legacy/transformer/beam_search_v1.py, models-2.13.1/official/legacy/transformer/compute_bleu.py, models-2.13.1/official/legacy/transformer/compute_bleu_test.py, models-2.13.1/official/legacy/transformer/data_download.py, models-2.13.1/official/legacy/transformer/data_pipeline.py, models-2.13.1/official/legacy/transformer/embedding_layer.py, models-2.13.1/official/legacy/transformer/ffn_layer.py, models-2.13.1/official/legacy/transformer/metrics.py, models-2.13.1/official/legacy/transformer/misc.py, models-2.13.1/official/legacy/transformer/model_params.py, models-2.13.1/official/legacy/transformer/model_utils.py, models-2.13.1/official/legacy/transformer/model_utils_test.py, models-2.13.1/official/legacy/transformer/optimizer.py, models-2.13.1/official/legacy/transformer/transformer.py, models-2.13.1/official/legacy/transformer/transformer_forward_test.py, models-2.13.1/official/legacy/transformer/transformer_layers_test.py, models-2.13.1/official/legacy/transformer/transformer_main.py, models-2.13.1/official/legacy/transformer/transformer_main_test.py, models-2.13.1/official/legacy/transformer/transformer_test.py, models-2.13.1/official/legacy/transformer/translate.py, models-2.13.1/official/legacy/xlnet/README.md, models-2.13.1/official/legacy/xlnet/__init__.py, models-2.13.1/official/legacy/xlnet/classifier_utils.py, models-2.13.1/official/legacy/xlnet/common_flags.py, models-2.13.1/official/legacy/xlnet/data_utils.py, models-2.13.1/official/legacy/xlnet/optimization.py, models-2.13.1/official/legacy/xlnet/preprocess_classification_data.py, models-2.13.1/official/legacy/xlnet/preprocess_pretrain_data.py, models-2.13.1/official/legacy/xlnet/preprocess_squad_data.py, models-2.13.1/official/legacy/xlnet/preprocess_utils.py, models-2.13.1/official/legacy/xlnet/run_classifier.py, models-2.13.1/official/legacy/xlnet/run_pretrain.py, models-2.13.1/official/legacy/xlnet/run_squad.py, models-2.13.1/official/legacy/xlnet/squad_utils.py, models-2.13.1/official/legacy/xlnet/training_utils.py, models-2.13.1/official/legacy/xlnet/xlnet_config.py, models-2.13.1/official/legacy/xlnet/xlnet_modeling.py, models-2.13.1/official/legacy/README.md, models-2.13.1/official/legacy/__init__.py, models-2.13.1/official/modeling/activations/__init__.py, models-2.13.1/official/modeling/activations/gelu.py, models-2.13.1/official/modeling/activations/gelu_test.py, models-2.13.1/official/modeling/activations/mish.py, models-2.13.1/official/modeling/activations/mish_test.py, models-2.13.1/official/modeling/activations/relu.py, models-2.13.1/official/modeling/activations/relu_test.py, models-2.13.1/official/modeling/activations/sigmoid.py, models-2.13.1/official/modeling/activations/sigmoid_test.py, models-2.13.1/official/modeling/activations/swish.py, models-2.13.1/official/modeling/activations/swish_test.py, models-2.13.1/official/modeling/fast_training/experimental/tf2_utils_2x_wide.py, models-2.13.1/official/modeling/fast_training/experimental/tf2_utils_2x_wide_test.py, models-2.13.1/official/modeling/fast_training/progressive/policies.py, models-2.13.1/official/modeling/fast_training/progressive/train.py, models-2.13.1/official/modeling/fast_training/progressive/train_lib.py, models-2.13.1/official/modeling/fast_training/progressive/train_lib_test.py, models-2.13.1/official/modeling/fast_training/progressive/trainer.py, models-2.13.1/official/modeling/fast_training/progressive/trainer_test.py, models-2.13.1/official/modeling/fast_training/progressive/utils.py, models-2.13.1/official/modeling/hyperparams/__init__.py, models-2.13.1/official/modeling/hyperparams/base_config.py, models-2.13.1/official/modeling/hyperparams/base_config_test.py, models-2.13.1/official/modeling/hyperparams/oneof.py, models-2.13.1/official/modeling/hyperparams/oneof_test.py, models-2.13.1/official/modeling/hyperparams/params_dict.py, models-2.13.1/official/modeling/hyperparams/params_dict_test.py, models-2.13.1/official/modeling/multitask/__init__.py, models-2.13.1/official/modeling/multitask/base_model.py, models-2.13.1/official/modeling/multitask/base_trainer.py, models-2.13.1/official/modeling/multitask/base_trainer_test.py, models-2.13.1/official/modeling/multitask/configs.py, models-2.13.1/official/modeling/multitask/evaluator.py, models-2.13.1/official/modeling/multitask/evaluator_test.py, models-2.13.1/official/modeling/multitask/interleaving_trainer.py, models-2.13.1/official/modeling/multitask/interleaving_trainer_test.py, models-2.13.1/official/modeling/multitask/multitask.py, models-2.13.1/official/modeling/multitask/task_sampler.py, models-2.13.1/official/modeling/multitask/task_sampler_test.py, models-2.13.1/official/modeling/multitask/test_utils.py, models-2.13.1/official/modeling/multitask/train_lib.py, models-2.13.1/official/modeling/multitask/train_lib_test.py, models-2.13.1/official/modeling/optimization/configs/__init__.py, models-2.13.1/official/modeling/optimization/configs/learning_rate_config.py, models-2.13.1/official/modeling/optimization/configs/optimization_config.py, models-2.13.1/official/modeling/optimization/configs/optimization_config_test.py, models-2.13.1/official/modeling/optimization/configs/optimizer_config.py, models-2.13.1/official/modeling/optimization/__init__.py, models-2.13.1/official/modeling/optimization/adafactor_optimizer.py, models-2.13.1/official/modeling/optimization/ema_optimizer.py, models-2.13.1/official/modeling/optimization/lamb.py, models-2.13.1/official/modeling/optimization/lamb_test.py, models-2.13.1/official/modeling/optimization/lars.py, models-2.13.1/official/modeling/optimization/legacy_adamw.py, models-2.13.1/official/modeling/optimization/lr_schedule.py, models-2.13.1/official/modeling/optimization/lr_schedule_test.py, models-2.13.1/official/modeling/optimization/optimizer_factory.py, models-2.13.1/official/modeling/optimization/optimizer_factory_test.py, models-2.13.1/official/modeling/optimization/slide_optimizer.py, models-2.13.1/official/modeling/privacy/__init__.py, models-2.13.1/official/modeling/privacy/configs.py, models-2.13.1/official/modeling/privacy/configs_test.py, models-2.13.1/official/modeling/privacy/ops.py, models-2.13.1/official/modeling/privacy/ops_test.py, models-2.13.1/official/modeling/__init__.py, models-2.13.1/official/modeling/grad_utils.py, models-2.13.1/official/modeling/grad_utils_test.py, models-2.13.1/official/modeling/performance.py, models-2.13.1/official/modeling/tf_utils.py, models-2.13.1/official/modeling/tf_utils_test.py, models-2.13.1/official/nlp/configs/experiments/glue_mnli_matched.yaml, models-2.13.1/official/nlp/configs/experiments/glue_mnli_text.yaml, models-2.13.1/official/nlp/configs/experiments/squad_v1.yaml, models-2.13.1/official/nlp/configs/experiments/wiki_books_pretrain.yaml, models-2.13.1/official/nlp/configs/experiments/wiki_tfds_pretrain.yaml, models-2.13.1/official/nlp/configs/models/albert_base.yaml, models-2.13.1/official/nlp/configs/models/bert_en_uncased_base.yaml, models-2.13.1/official/nlp/configs/__init__.py, models-2.13.1/official/nlp/configs/bert.py, models-2.13.1/official/nlp/configs/electra.py, models-2.13.1/official/nlp/configs/encoders.py, models-2.13.1/official/nlp/configs/encoders_test.py, models-2.13.1/official/nlp/configs/experiment_configs.py, models-2.13.1/official/nlp/configs/finetuning_experiments.py, models-2.13.1/official/nlp/configs/pretraining_experiments.py, models-2.13.1/official/nlp/configs/wmt_transformer_experiments.py, models-2.13.1/official/nlp/data/README.md, models-2.13.1/official/nlp/data/__init__.py, models-2.13.1/official/nlp/data/classifier_data_lib.py, models-2.13.1/official/nlp/data/classifier_data_lib_test.py, models-2.13.1/official/nlp/data/create_finetuning_data.py, models-2.13.1/official/nlp/data/create_pretraining_data.py, models-2.13.1/official/nlp/data/create_pretraining_data_test.py, models-2.13.1/official/nlp/data/create_xlnet_pretraining_data.py, models-2.13.1/official/nlp/data/create_xlnet_pretraining_data_test.py, models-2.13.1/official/nlp/data/data_loader.py, models-2.13.1/official/nlp/data/data_loader_factory.py, models-2.13.1/official/nlp/data/data_loader_factory_test.py, models-2.13.1/official/nlp/data/dual_encoder_dataloader.py, models-2.13.1/official/nlp/data/dual_encoder_dataloader_test.py, models-2.13.1/official/nlp/data/pretrain_dataloader.py, models-2.13.1/official/nlp/data/pretrain_dataloader_test.py, models-2.13.1/official/nlp/data/pretrain_dynamic_dataloader.py, models-2.13.1/official/nlp/data/pretrain_dynamic_dataloader_test.py, models-2.13.1/official/nlp/data/pretrain_text_dataloader.py, models-2.13.1/official/nlp/data/question_answering_dataloader.py, models-2.13.1/official/nlp/data/question_answering_dataloader_test.py, models-2.13.1/official/nlp/data/sentence_prediction_dataloader.py, models-2.13.1/official/nlp/data/sentence_prediction_dataloader_test.py, models-2.13.1/official/nlp/data/sentence_retrieval_lib.py, models-2.13.1/official/nlp/data/squad_lib.py, models-2.13.1/official/nlp/data/squad_lib_sp.py, models-2.13.1/official/nlp/data/tagging_data_lib.py, models-2.13.1/official/nlp/data/tagging_data_lib_test.py, models-2.13.1/official/nlp/data/tagging_dataloader.py, models-2.13.1/official/nlp/data/tagging_dataloader_test.py, models-2.13.1/official/nlp/data/train_sentencepiece.py, models-2.13.1/official/nlp/data/wmt_dataloader.py, models-2.13.1/official/nlp/data/wmt_dataloader_test.py, models-2.13.1/official/nlp/docs/README.md, models-2.13.1/official/nlp/docs/pretrain.md, models-2.13.1/official/nlp/docs/pretrained_models.md, models-2.13.1/official/nlp/docs/tfhub.md, models-2.13.1/official/nlp/docs/train.md, models-2.13.1/official/nlp/finetuning/glue/flags.py, models-2.13.1/official/nlp/finetuning/glue/run_glue.py, models-2.13.1/official/nlp/finetuning/superglue/flags.py, models-2.13.1/official/nlp/finetuning/superglue/run_superglue.py, models-2.13.1/official/nlp/finetuning/binary_helper.py, models-2.13.1/official/nlp/metrics/__init__.py, models-2.13.1/official/nlp/metrics/bleu.py, models-2.13.1/official/nlp/metrics/bleu_test.py, models-2.13.1/official/nlp/modeling/layers/README.md, models-2.13.1/official/nlp/modeling/layers/__init__.py, models-2.13.1/official/nlp/modeling/layers/attention.py, models-2.13.1/official/nlp/modeling/layers/attention_test.py, models-2.13.1/official/nlp/modeling/layers/bigbird_attention.py, models-2.13.1/official/nlp/modeling/layers/bigbird_attention_test.py, models-2.13.1/official/nlp/modeling/layers/block_diag_feedforward.py, models-2.13.1/official/nlp/modeling/layers/block_diag_feedforward_test.py, models-2.13.1/official/nlp/modeling/layers/cls_head.py, models-2.13.1/official/nlp/modeling/layers/cls_head_test.py, models-2.13.1/official/nlp/modeling/layers/factorized_embedding.py, models-2.13.1/official/nlp/modeling/layers/factorized_embedding_test.py, models-2.13.1/official/nlp/modeling/layers/gated_feedforward.py, models-2.13.1/official/nlp/modeling/layers/gated_feedforward_test.py, models-2.13.1/official/nlp/modeling/layers/gaussian_process.py, models-2.13.1/official/nlp/modeling/layers/gaussian_process_test.py, models-2.13.1/official/nlp/modeling/layers/kernel_attention.py, models-2.13.1/official/nlp/modeling/layers/kernel_attention_test.py, models-2.13.1/official/nlp/modeling/layers/masked_lm.py, models-2.13.1/official/nlp/modeling/layers/masked_lm_test.py, models-2.13.1/official/nlp/modeling/layers/masked_softmax.py, models-2.13.1/official/nlp/modeling/layers/masked_softmax_test.py, models-2.13.1/official/nlp/modeling/layers/mat_mul_with_margin.py, models-2.13.1/official/nlp/modeling/layers/mat_mul_with_margin_test.py, models-2.13.1/official/nlp/modeling/layers/mixing.py, models-2.13.1/official/nlp/modeling/layers/mixing_test.py, models-2.13.1/official/nlp/modeling/layers/mobile_bert_layers.py, models-2.13.1/official/nlp/modeling/layers/mobile_bert_layers_test.py, models-2.13.1/official/nlp/modeling/layers/moe.py, models-2.13.1/official/nlp/modeling/layers/moe_test.py, models-2.13.1/official/nlp/modeling/layers/multi_channel_attention.py, models-2.13.1/official/nlp/modeling/layers/multi_channel_attention_test.py, models-2.13.1/official/nlp/modeling/layers/on_device_embedding.py, models-2.13.1/official/nlp/modeling/layers/on_device_embedding_test.py, models-2.13.1/official/nlp/modeling/layers/pack_optimization.py, models-2.13.1/official/nlp/modeling/layers/pack_optimization_test.py, models-2.13.1/official/nlp/modeling/layers/per_dim_scale_attention.py, models-2.13.1/official/nlp/modeling/layers/per_dim_scale_attention_test.py, models-2.13.1/official/nlp/modeling/layers/position_embedding.py, models-2.13.1/official/nlp/modeling/layers/position_embedding_test.py, models-2.13.1/official/nlp/modeling/layers/relative_attention.py, models-2.13.1/official/nlp/modeling/layers/relative_attention_test.py, models-2.13.1/official/nlp/modeling/layers/reuse_attention.py, models-2.13.1/official/nlp/modeling/layers/reuse_attention_test.py, models-2.13.1/official/nlp/modeling/layers/reuse_transformer.py, models-2.13.1/official/nlp/modeling/layers/reuse_transformer_test.py, models-2.13.1/official/nlp/modeling/layers/rezero_transformer.py, models-2.13.1/official/nlp/modeling/layers/rezero_transformer_test.py, models-2.13.1/official/nlp/modeling/layers/routing.py, models-2.13.1/official/nlp/modeling/layers/routing_test.py, models-2.13.1/official/nlp/modeling/layers/self_attention_mask.py, models-2.13.1/official/nlp/modeling/layers/spectral_normalization.py, models-2.13.1/official/nlp/modeling/layers/spectral_normalization_test.py, models-2.13.1/official/nlp/modeling/layers/talking_heads_attention.py, models-2.13.1/official/nlp/modeling/layers/talking_heads_attention_test.py, models-2.13.1/official/nlp/modeling/layers/text_layers.py, models-2.13.1/official/nlp/modeling/layers/text_layers_test.py, models-2.13.1/official/nlp/modeling/layers/tn_expand_condense.py, models-2.13.1/official/nlp/modeling/layers/tn_expand_condense_test.py, models-2.13.1/official/nlp/modeling/layers/tn_transformer_expand_condense.py, models-2.13.1/official/nlp/modeling/layers/tn_transformer_test.py, models-2.13.1/official/nlp/modeling/layers/transformer.py, models-2.13.1/official/nlp/modeling/layers/transformer_encoder_block.py, models-2.13.1/official/nlp/modeling/layers/transformer_encoder_block_test.py, models-2.13.1/official/nlp/modeling/layers/transformer_scaffold.py, models-2.13.1/official/nlp/modeling/layers/transformer_scaffold_test.py, models-2.13.1/official/nlp/modeling/layers/transformer_test.py, models-2.13.1/official/nlp/modeling/layers/transformer_xl.py, models-2.13.1/official/nlp/modeling/layers/transformer_xl_test.py, models-2.13.1/official/nlp/modeling/layers/util.py, models-2.13.1/official/nlp/modeling/losses/README.md, models-2.13.1/official/nlp/modeling/losses/__init__.py, models-2.13.1/official/nlp/modeling/losses/weighted_sparse_categorical_crossentropy.py, models-2.13.1/official/nlp/modeling/losses/weighted_sparse_categorical_crossentropy_test.py, models-2.13.1/official/nlp/modeling/models/README.md, models-2.13.1/official/nlp/modeling/models/__init__.py, models-2.13.1/official/nlp/modeling/models/bert_classifier.py, models-2.13.1/official/nlp/modeling/models/bert_classifier_test.py, models-2.13.1/official/nlp/modeling/models/bert_pretrainer.py, models-2.13.1/official/nlp/modeling/models/bert_pretrainer_test.py, models-2.13.1/official/nlp/modeling/models/bert_span_labeler.py, models-2.13.1/official/nlp/modeling/models/bert_span_labeler_test.py, models-2.13.1/official/nlp/modeling/models/bert_token_classifier.py, models-2.13.1/official/nlp/modeling/models/bert_token_classifier_test.py, models-2.13.1/official/nlp/modeling/models/dual_encoder.py, models-2.13.1/official/nlp/modeling/models/dual_encoder_test.py, models-2.13.1/official/nlp/modeling/models/electra_pretrainer.py, models-2.13.1/official/nlp/modeling/models/electra_pretrainer_test.py, models-2.13.1/official/nlp/modeling/models/seq2seq_transformer.py, models-2.13.1/official/nlp/modeling/models/seq2seq_transformer_test.py, models-2.13.1/official/nlp/modeling/models/t5.py, models-2.13.1/official/nlp/modeling/models/t5_test.py, models-2.13.1/official/nlp/modeling/models/xlnet.py, models-2.13.1/official/nlp/modeling/models/xlnet_test.py, models-2.13.1/official/nlp/modeling/networks/README.md, models-2.13.1/official/nlp/modeling/networks/__init__.py, models-2.13.1/official/nlp/modeling/networks/albert_encoder.py, models-2.13.1/official/nlp/modeling/networks/albert_encoder_test.py, models-2.13.1/official/nlp/modeling/networks/bert_dense_encoder_test.py, models-2.13.1/official/nlp/modeling/networks/bert_encoder.py, models-2.13.1/official/nlp/modeling/networks/bert_encoder_test.py, models-2.13.1/official/nlp/modeling/networks/classification.py, models-2.13.1/official/nlp/modeling/networks/classification_test.py, models-2.13.1/official/nlp/modeling/networks/encoder_scaffold.py, models-2.13.1/official/nlp/modeling/networks/encoder_scaffold_test.py, models-2.13.1/official/nlp/modeling/networks/fnet.py, models-2.13.1/official/nlp/modeling/networks/fnet_test.py, models-2.13.1/official/nlp/modeling/networks/funnel_transformer.py, models-2.13.1/official/nlp/modeling/networks/funnel_transformer_test.py, models-2.13.1/official/nlp/modeling/networks/mobile_bert_encoder.py, models-2.13.1/official/nlp/modeling/networks/mobile_bert_encoder_test.py, models-2.13.1/official/nlp/modeling/networks/packed_sequence_embedding.py, models-2.13.1/official/nlp/modeling/networks/packed_sequence_embedding_test.py, models-2.13.1/official/nlp/modeling/networks/span_labeling.py, models-2.13.1/official/nlp/modeling/networks/span_labeling_test.py, models-2.13.1/official/nlp/modeling/networks/sparse_mixer.py, models-2.13.1/official/nlp/modeling/networks/sparse_mixer_test.py, models-2.13.1/official/nlp/modeling/networks/xlnet_base.py, models-2.13.1/official/nlp/modeling/networks/xlnet_base_test.py, models-2.13.1/official/nlp/modeling/ops/__init__.py, models-2.13.1/official/nlp/modeling/ops/beam_search.py, models-2.13.1/official/nlp/modeling/ops/beam_search_test.py, models-2.13.1/official/nlp/modeling/ops/decoding_module.py, models-2.13.1/official/nlp/modeling/ops/decoding_module_test.py, models-2.13.1/official/nlp/modeling/ops/sampling_module.py, models-2.13.1/official/nlp/modeling/ops/segment_extractor.py, models-2.13.1/official/nlp/modeling/ops/segment_extractor_test.py, models-2.13.1/official/nlp/modeling/README.md, models-2.13.1/official/nlp/modeling/__init__.py, models-2.13.1/official/nlp/serving/__init__.py, models-2.13.1/official/nlp/serving/export_savedmodel.py, models-2.13.1/official/nlp/serving/export_savedmodel_test.py, models-2.13.1/official/nlp/serving/export_savedmodel_util.py, models-2.13.1/official/nlp/serving/serving_modules.py, models-2.13.1/official/nlp/serving/serving_modules_test.py, models-2.13.1/official/nlp/tasks/__init__.py, models-2.13.1/official/nlp/tasks/dual_encoder.py, models-2.13.1/official/nlp/tasks/dual_encoder_test.py, models-2.13.1/official/nlp/tasks/electra_task.py, models-2.13.1/official/nlp/tasks/electra_task_test.py, models-2.13.1/official/nlp/tasks/masked_lm.py, models-2.13.1/official/nlp/tasks/masked_lm_determinism_test.py, models-2.13.1/official/nlp/tasks/masked_lm_test.py, models-2.13.1/official/nlp/tasks/question_answering.py, models-2.13.1/official/nlp/tasks/question_answering_test.py, models-2.13.1/official/nlp/tasks/sentence_prediction.py, models-2.13.1/official/nlp/tasks/sentence_prediction_test.py, models-2.13.1/official/nlp/tasks/tagging.py, models-2.13.1/official/nlp/tasks/tagging_test.py, models-2.13.1/official/nlp/tasks/translation.py, models-2.13.1/official/nlp/tasks/translation_test.py, models-2.13.1/official/nlp/tasks/utils.py, models-2.13.1/official/nlp/tools/__init__.py, models-2.13.1/official/nlp/tools/export_tfhub.py, models-2.13.1/official/nlp/tools/export_tfhub_lib.py, models-2.13.1/official/nlp/tools/export_tfhub_lib_test.py, models-2.13.1/official/nlp/tools/squad_evaluate_v1_1.py, models-2.13.1/official/nlp/tools/squad_evaluate_v2_0.py, models-2.13.1/official/nlp/tools/tf1_bert_checkpoint_converter_lib.py, models-2.13.1/official/nlp/tools/tf2_albert_encoder_checkpoint_converter.py, models-2.13.1/official/nlp/tools/tf2_bert_encoder_checkpoint_converter.py, models-2.13.1/official/nlp/tools/tokenization.py, models-2.13.1/official/nlp/tools/tokenization_test.py, models-2.13.1/official/nlp/MODEL_GARDEN.md, models-2.13.1/official/nlp/README.md, models-2.13.1/official/nlp/__init__.py, models-2.13.1/official/nlp/continuous_finetune_lib.py, models-2.13.1/official/nlp/continuous_finetune_lib_test.py, models-2.13.1/official/nlp/optimization.py, models-2.13.1/official/nlp/train.py, models-2.13.1/official/pip_package/setup.py, models-2.13.1/official/projects/assemblenet/configs/assemblenet.py, models-2.13.1/official/projects/assemblenet/configs/assemblenet_test.py, models-2.13.1/official/projects/assemblenet/experiment/ucf101_assemblenet_plus_tpu.yaml, models-2.13.1/official/projects/assemblenet/experiment/ucf101_assemblenet_tpu.yaml, models-2.13.1/official/projects/assemblenet/modeling/assemblenet.py, models-2.13.1/official/projects/assemblenet/modeling/assemblenet_plus.py, models-2.13.1/official/projects/assemblenet/modeling/assemblenet_plus_test.py, models-2.13.1/official/projects/assemblenet/modeling/rep_flow_2d_layer.py, models-2.13.1/official/projects/assemblenet/README.md, models-2.13.1/official/projects/assemblenet/train.py, models-2.13.1/official/projects/assemblenet/train_test.py, models-2.13.1/official/projects/backbone_reuse/configs/experiments/faster_rcnn/fastrcnn_resnet101_fpn_600epochs.yaml, models-2.13.1/official/projects/backbone_reuse/configs/experiments/faster_rcnn/fastrcnn_resnet101_fpn_72epochs.yaml, models-2.13.1/official/projects/backbone_reuse/configs/experiments/faster_rcnn/fastrcnn_resnet101_fpn_cascade_600epochs.yaml, models-2.13.1/official/projects/backbone_reuse/configs/experiments/faster_rcnn/fastrcnn_resnet101_fpn_cascade_72epochs.yaml, models-2.13.1/official/projects/backbone_reuse/configs/experiments/faster_rcnn/fastrcnn_resnet101_nasfpn_600epochs.yaml, models-2.13.1/official/projects/backbone_reuse/configs/experiments/faster_rcnn/fastrcnn_resnet101_nasfpn_72epochs.yaml, models-2.13.1/official/projects/backbone_reuse/configs/experiments/faster_rcnn/fastrcnn_resnet101_nasfpn_cascade_600epochs.yaml, models-2.13.1/official/projects/backbone_reuse/configs/experiments/faster_rcnn/fastrcnn_resnet101_nasfpn_cascade_72epochs.yaml, models-2.13.1/official/projects/backbone_reuse/configs/experiments/retinanet/retinanet_resnet101_fpn_600epochs.yaml, models-2.13.1/official/projects/backbone_reuse/configs/experiments/retinanet/retinanet_resnet101_fpn_72epochs.yaml, models-2.13.1/official/projects/backbone_reuse/configs/experiments/retinanet/retinanet_resnet101_nasfpn_600epochs.yaml, models-2.13.1/official/projects/backbone_reuse/configs/experiments/retinanet/retinanet_resnet101_nasfpn_72epochs.yaml, models-2.13.1/official/projects/backbone_reuse/README.md, models-2.13.1/official/projects/basnet/configs/experiments/basnet_dut_gpu.yaml, models-2.13.1/official/projects/basnet/configs/basnet.py, models-2.13.1/official/projects/basnet/configs/basnet_test.py, models-2.13.1/official/projects/basnet/evaluation/metrics.py, models-2.13.1/official/projects/basnet/evaluation/metrics_test.py, models-2.13.1/official/projects/basnet/losses/basnet_losses.py, models-2.13.1/official/projects/basnet/modeling/basnet_model.py, models-2.13.1/official/projects/basnet/modeling/basnet_model_test.py, models-2.13.1/official/projects/basnet/modeling/nn_blocks.py, models-2.13.1/official/projects/basnet/modeling/refunet.py, models-2.13.1/official/projects/basnet/serving/basnet.py, models-2.13.1/official/projects/basnet/serving/export_saved_model.py, models-2.13.1/official/projects/basnet/tasks/basnet.py, models-2.13.1/official/projects/basnet/README.md, models-2.13.1/official/projects/basnet/train.py, models-2.13.1/official/projects/bigbird/experiments/glue_mnli_matched.yaml, models-2.13.1/official/projects/bigbird/experiments/squad_v1.yaml, models-2.13.1/official/projects/bigbird/README.md, models-2.13.1/official/projects/bigbird/__init__.py, models-2.13.1/official/projects/bigbird/encoder.py, models-2.13.1/official/projects/bigbird/encoder_test.py, models-2.13.1/official/projects/bigbird/experiment_configs.py, models-2.13.1/official/projects/bigbird/recompute_grad.py, models-2.13.1/official/projects/bigbird/recomputing_dropout.py, models-2.13.1/official/projects/bigbird/stateless_dropout.py, models-2.13.1/official/projects/centernet/common/__init__.py, models-2.13.1/official/projects/centernet/common/registry_imports.py, models-2.13.1/official/projects/centernet/configs/experiments/coco-centernet-hourglass-gpu.yaml, models-2.13.1/official/projects/centernet/configs/experiments/coco-centernet-hourglass-tpu.yaml, models-2.13.1/official/projects/centernet/configs/__init__.py, models-2.13.1/official/projects/centernet/configs/backbones.py, models-2.13.1/official/projects/centernet/configs/centernet.py, models-2.13.1/official/projects/centernet/configs/centernet_test.py, models-2.13.1/official/projects/centernet/dataloaders/__init__.py, models-2.13.1/official/projects/centernet/dataloaders/centernet_input.py, models-2.13.1/official/projects/centernet/losses/__init__.py, models-2.13.1/official/projects/centernet/losses/centernet_losses.py, models-2.13.1/official/projects/centernet/losses/centernet_losses_test.py, models-2.13.1/official/projects/centernet/modeling/backbones/__init__.py, models-2.13.1/official/projects/centernet/modeling/backbones/hourglass.py, models-2.13.1/official/projects/centernet/modeling/backbones/hourglass_test.py, models-2.13.1/official/projects/centernet/modeling/heads/__init__.py, models-2.13.1/official/projects/centernet/modeling/heads/centernet_head.py, models-2.13.1/official/projects/centernet/modeling/heads/centernet_head_test.py, models-2.13.1/official/projects/centernet/modeling/layers/__init__.py, models-2.13.1/official/projects/centernet/modeling/layers/cn_nn_blocks.py, models-2.13.1/official/projects/centernet/modeling/layers/cn_nn_blocks_test.py, models-2.13.1/official/projects/centernet/modeling/layers/detection_generator.py, models-2.13.1/official/projects/centernet/modeling/__init__.py, models-2.13.1/official/projects/centernet/modeling/centernet_model.py, models-2.13.1/official/projects/centernet/modeling/centernet_model_test.py, models-2.13.1/official/projects/centernet/ops/__init__.py, models-2.13.1/official/projects/centernet/ops/box_list.py, models-2.13.1/official/projects/centernet/ops/box_list_ops.py, models-2.13.1/official/projects/centernet/ops/loss_ops.py, models-2.13.1/official/projects/centernet/ops/nms_ops.py, models-2.13.1/official/projects/centernet/ops/preprocess_ops.py, models-2.13.1/official/projects/centernet/ops/target_assigner.py, models-2.13.1/official/projects/centernet/ops/target_assigner_test.py, models-2.13.1/official/projects/centernet/tasks/__init__.py, models-2.13.1/official/projects/centernet/tasks/centernet.py, models-2.13.1/official/projects/centernet/utils/checkpoints/__init__.py, models-2.13.1/official/projects/centernet/utils/checkpoints/config_classes.py, models-2.13.1/official/projects/centernet/utils/checkpoints/config_data.py, models-2.13.1/official/projects/centernet/utils/checkpoints/load_weights.py, models-2.13.1/official/projects/centernet/utils/checkpoints/read_checkpoints.py, models-2.13.1/official/projects/centernet/utils/__init__.py, models-2.13.1/official/projects/centernet/utils/tf2_centernet_checkpoint_converter.py, models-2.13.1/official/projects/centernet/README.md, models-2.13.1/official/projects/centernet/__init__.py, models-2.13.1/official/projects/centernet/train.py, models-2.13.1/official/projects/const_cl/README.md, models-2.13.1/official/projects/cots_detector/README.md, models-2.13.1/official/projects/cots_detector/crown_of_thorns_starfish_detection_pipeline.ipynb, models-2.13.1/official/projects/deepmac_maskrcnn/common/__init__.py, models-2.13.1/official/projects/deepmac_maskrcnn/common/registry_imports.py, models-2.13.1/official/projects/deepmac_maskrcnn/configs/experiments/deep_mask_head_rcnn_nonvoc_spinenet143_hg52.yaml, models-2.13.1/official/projects/deepmac_maskrcnn/configs/experiments/deep_mask_head_rcnn_voc_r101_hg52.yaml, models-2.13.1/official/projects/deepmac_maskrcnn/configs/experiments/deep_mask_head_rcnn_voc_r50.yaml, models-2.13.1/official/projects/deepmac_maskrcnn/configs/experiments/deep_mask_head_rcnn_voc_r50_hg52.yaml, models-2.13.1/official/projects/deepmac_maskrcnn/configs/experiments/deep_mask_head_rcnn_voc_spinenet143_hg52.yaml, models-2.13.1/official/projects/deepmac_maskrcnn/configs/__init__.py, models-2.13.1/official/projects/deepmac_maskrcnn/configs/deep_mask_head_rcnn.py, models-2.13.1/official/projects/deepmac_maskrcnn/configs/deep_mask_head_rcnn_config_test.py, models-2.13.1/official/projects/deepmac_maskrcnn/modeling/heads/__init__.py, models-2.13.1/official/projects/deepmac_maskrcnn/modeling/heads/hourglass_network.py, models-2.13.1/official/projects/deepmac_maskrcnn/modeling/heads/instance_heads.py, models-2.13.1/official/projects/deepmac_maskrcnn/modeling/heads/instance_heads_test.py, models-2.13.1/official/projects/deepmac_maskrcnn/modeling/__init__.py, models-2.13.1/official/projects/deepmac_maskrcnn/modeling/maskrcnn_model.py, models-2.13.1/official/projects/deepmac_maskrcnn/modeling/maskrcnn_model_test.py, models-2.13.1/official/projects/deepmac_maskrcnn/serving/__init__.py, models-2.13.1/official/projects/deepmac_maskrcnn/serving/detection.py, models-2.13.1/official/projects/deepmac_maskrcnn/serving/detection_test.py, models-2.13.1/official/projects/deepmac_maskrcnn/serving/export_saved_model.py, models-2.13.1/official/projects/deepmac_maskrcnn/tasks/__init__.py, models-2.13.1/official/projects/deepmac_maskrcnn/tasks/deep_mask_head_rcnn.py, models-2.13.1/official/projects/deepmac_maskrcnn/README.md, models-2.13.1/official/projects/deepmac_maskrcnn/__init__.py, models-2.13.1/official/projects/deepmac_maskrcnn/train.py, models-2.13.1/official/projects/detr/configs/detr.py, models-2.13.1/official/projects/detr/configs/detr_test.py, models-2.13.1/official/projects/detr/dataloaders/coco.py, models-2.13.1/official/projects/detr/dataloaders/coco_test.py, models-2.13.1/official/projects/detr/dataloaders/detr_input.py, models-2.13.1/official/projects/detr/experiments/detr_r50_300epochs.sh, models-2.13.1/official/projects/detr/experiments/detr_r50_500epochs.sh, models-2.13.1/official/projects/detr/modeling/detr.py, models-2.13.1/official/projects/detr/modeling/detr_test.py, models-2.13.1/official/projects/detr/modeling/transformer.py, models-2.13.1/official/projects/detr/modeling/transformer_test.py, models-2.13.1/official/projects/detr/ops/matchers.py, models-2.13.1/official/projects/detr/ops/matchers_test.py, models-2.13.1/official/projects/detr/serving/export_module.py, models-2.13.1/official/projects/detr/serving/export_module_test.py, models-2.13.1/official/projects/detr/serving/export_saved_model.py, models-2.13.1/official/projects/detr/tasks/detection.py, models-2.13.1/official/projects/detr/tasks/detection_test.py, models-2.13.1/official/projects/detr/README.md, models-2.13.1/official/projects/detr/optimization.py, models-2.13.1/official/projects/detr/train.py, models-2.13.1/official/projects/edgetpu/nlp/configs/__init__.py, models-2.13.1/official/projects/edgetpu/nlp/configs/params.py, models-2.13.1/official/projects/edgetpu/nlp/experiments/downstream_tasks/glue_mnli.yaml, models-2.13.1/official/projects/edgetpu/nlp/experiments/downstream_tasks/mobilebert_baseline.yaml, models-2.13.1/official/projects/edgetpu/nlp/experiments/downstream_tasks/mobilebert_edgetpu_m.yaml, models-2.13.1/official/projects/edgetpu/nlp/experiments/downstream_tasks/mobilebert_edgetpu_s.yaml, models-2.13.1/official/projects/edgetpu/nlp/experiments/downstream_tasks/mobilebert_edgetpu_xs.yaml, models-2.13.1/official/projects/edgetpu/nlp/experiments/downstream_tasks/mobilebert_edgetpu_xxs.yaml, models-2.13.1/official/projects/edgetpu/nlp/experiments/downstream_tasks/squad_v1.yaml, models-2.13.1/official/projects/edgetpu/nlp/experiments/mobilebert_baseline.yaml, models-2.13.1/official/projects/edgetpu/nlp/experiments/mobilebert_edgetpu_m.yaml, models-2.13.1/official/projects/edgetpu/nlp/experiments/mobilebert_edgetpu_s.yaml, models-2.13.1/official/projects/edgetpu/nlp/experiments/mobilebert_edgetpu_xs.yaml, models-2.13.1/official/projects/edgetpu/nlp/experiments/mobilebert_edgetpu_xxs.yaml, models-2.13.1/official/projects/edgetpu/nlp/modeling/__init__.py, models-2.13.1/official/projects/edgetpu/nlp/modeling/edgetpu_layers.py, models-2.13.1/official/projects/edgetpu/nlp/modeling/edgetpu_layers_test.py, models-2.13.1/official/projects/edgetpu/nlp/modeling/encoder.py, models-2.13.1/official/projects/edgetpu/nlp/modeling/model_builder.py, models-2.13.1/official/projects/edgetpu/nlp/modeling/model_builder_test.py, models-2.13.1/official/projects/edgetpu/nlp/modeling/pretrainer.py, models-2.13.1/official/projects/edgetpu/nlp/modeling/pretrainer_test.py, models-2.13.1/official/projects/edgetpu/nlp/serving/__init__.py, models-2.13.1/official/projects/edgetpu/nlp/serving/export_tflite_squad.py, models-2.13.1/official/projects/edgetpu/nlp/serving/export_tflite_squad_test.py, models-2.13.1/official/projects/edgetpu/nlp/utils/__init__.py, models-2.13.1/official/projects/edgetpu/nlp/utils/utils.py, models-2.13.1/official/projects/edgetpu/nlp/utils/utils_test.py, models-2.13.1/official/projects/edgetpu/nlp/README.md, models-2.13.1/official/projects/edgetpu/nlp/__init__.py, models-2.13.1/official/projects/edgetpu/nlp/mobilebert_edgetpu_trainer.py, models-2.13.1/official/projects/edgetpu/nlp/mobilebert_edgetpu_trainer_test.py, models-2.13.1/official/projects/edgetpu/nlp/run_mobilebert_edgetpu_train.py, models-2.13.1/official/projects/edgetpu/vision/configs/__init__.py, models-2.13.1/official/projects/edgetpu/vision/configs/mobilenet_edgetpu_config.py, models-2.13.1/official/projects/edgetpu/vision/configs/semantic_segmentation_config.py, models-2.13.1/official/projects/edgetpu/vision/configs/semantic_segmentation_searched_config.py, models-2.13.1/official/projects/edgetpu/vision/dataloaders/__init__.py, models-2.13.1/official/projects/edgetpu/vision/dataloaders/classification_input.py, models-2.13.1/official/projects/edgetpu/vision/dataloaders/classification_input_test.py, models-2.13.1/official/projects/edgetpu/vision/modeling/backbones/__init__.py, models-2.13.1/official/projects/edgetpu/vision/modeling/backbones/mobilenet_edgetpu.py, models-2.13.1/official/projects/edgetpu/vision/modeling/backbones/mobilenet_edgetpu_test.py, models-2.13.1/official/projects/edgetpu/vision/modeling/heads/__init__.py, models-2.13.1/official/projects/edgetpu/vision/modeling/heads/bifpn_head.py, models-2.13.1/official/projects/edgetpu/vision/modeling/__init__.py, models-2.13.1/official/projects/edgetpu/vision/modeling/common_modules.py, models-2.13.1/official/projects/edgetpu/vision/modeling/custom_layers.py, models-2.13.1/official/projects/edgetpu/vision/modeling/custom_layers_test.py, models-2.13.1/official/projects/edgetpu/vision/modeling/mobilenet_edgetpu_v1_model.py, models-2.13.1/official/projects/edgetpu/vision/modeling/mobilenet_edgetpu_v1_model_blocks.py, models-2.13.1/official/projects/edgetpu/vision/modeling/mobilenet_edgetpu_v1_model_test.py, models-2.13.1/official/projects/edgetpu/vision/modeling/mobilenet_edgetpu_v2_model.py, models-2.13.1/official/projects/edgetpu/vision/modeling/mobilenet_edgetpu_v2_model_blocks.py, models-2.13.1/official/projects/edgetpu/vision/modeling/mobilenet_edgetpu_v2_model_blocks_test.py, models-2.13.1/official/projects/edgetpu/vision/modeling/mobilenet_edgetpu_v2_model_test.py, models-2.13.1/official/projects/edgetpu/vision/modeling/optimized_multiheadattention_layer.py, models-2.13.1/official/projects/edgetpu/vision/modeling/optimized_multiheadattention_layer_test.py, models-2.13.1/official/projects/edgetpu/vision/serving/testdata/ADE_val_00000557.jpg, models-2.13.1/official/projects/edgetpu/vision/serving/testdata/ADE_val_00001471.jpg, models-2.13.1/official/projects/edgetpu/vision/serving/testdata/ADE_val_00001626.jpg, models-2.13.1/official/projects/edgetpu/vision/serving/__init__.py, models-2.13.1/official/projects/edgetpu/vision/serving/export_tflite.py, models-2.13.1/official/projects/edgetpu/vision/serving/export_tflite_test.py, models-2.13.1/official/projects/edgetpu/vision/serving/export_util.py, models-2.13.1/official/projects/edgetpu/vision/serving/inference_visualization_tool.ipynb, models-2.13.1/official/projects/edgetpu/vision/serving/tflite_imagenet_evaluator.py, models-2.13.1/official/projects/edgetpu/vision/serving/tflite_imagenet_evaluator_run.py, models-2.13.1/official/projects/edgetpu/vision/serving/tflite_imagenet_evaluator_test.py, models-2.13.1/official/projects/edgetpu/vision/tasks/__init__.py, models-2.13.1/official/projects/edgetpu/vision/tasks/image_classification.py, models-2.13.1/official/projects/edgetpu/vision/tasks/image_classification_test.py, models-2.13.1/official/projects/edgetpu/vision/tasks/semantic_segmentation.py, models-2.13.1/official/projects/edgetpu/vision/tasks/semantic_segmentation_test.py, models-2.13.1/official/projects/edgetpu/vision/README.md, models-2.13.1/official/projects/edgetpu/vision/__init__.py, models-2.13.1/official/projects/edgetpu/vision/train.py, models-2.13.1/official/projects/edgetpu/README.md, models-2.13.1/official/projects/fffner/experiments/base_conll2003.yaml, models-2.13.1/official/projects/fffner/experiments/base_restaurants.yaml, models-2.13.1/official/projects/fffner/utils/convert_checkpoint_huggingface.py, models-2.13.1/official/projects/fffner/utils/convert_checkpoint_tensorflow.py, models-2.13.1/official/projects/fffner/utils/create_data.py, models-2.13.1/official/projects/fffner/README.md, models-2.13.1/official/projects/fffner/fffner.py, models-2.13.1/official/projects/fffner/fffner_classifier.py, models-2.13.1/official/projects/fffner/fffner_dataloader.py, models-2.13.1/official/projects/fffner/fffner_encoder.py, models-2.13.1/official/projects/fffner/fffner_encoder_test.py, models-2.13.1/official/projects/fffner/fffner_experiments.py, models-2.13.1/official/projects/fffner/fffner_prediction.py, models-2.13.1/official/projects/fffner/train.py, models-2.13.1/official/projects/labse/experiments/labse_base.yaml, models-2.13.1/official/projects/labse/experiments/labse_bert_base.yaml, models-2.13.1/official/projects/labse/README.md, models-2.13.1/official/projects/labse/config_labse.py, models-2.13.1/official/projects/labse/export_tfhub.py, models-2.13.1/official/projects/labse/export_tfhub_test.py, models-2.13.1/official/projects/labse/train.py, models-2.13.1/official/projects/longformer/experiments/glue_mnli.yaml, models-2.13.1/official/projects/longformer/experiments/glue_mnli_allenai.yaml, models-2.13.1/official/projects/longformer/experiments/pretraining_512.yaml, models-2.13.1/official/projects/longformer/utils/convert_pretrained_pytorch_checkpoint_to_tf.py, models-2.13.1/official/projects/longformer/utils/longformer_tokenizer_to_tfrecord.py, models-2.13.1/official/projects/longformer/README.md, models-2.13.1/official/projects/longformer/longformer.py, models-2.13.1/official/projects/longformer/longformer_attention.py, models-2.13.1/official/projects/longformer/longformer_attention_test.py, models-2.13.1/official/projects/longformer/longformer_encoder.py, models-2.13.1/official/projects/longformer/longformer_encoder_block.py, models-2.13.1/official/projects/longformer/longformer_encoder_test.py, models-2.13.1/official/projects/longformer/longformer_experiments.py, models-2.13.1/official/projects/longformer/train.py, models-2.13.1/official/projects/lra/experiments/lra_aan.yaml, models-2.13.1/official/projects/lra/experiments/lra_aan_linformer.yaml, models-2.13.1/official/projects/lra/experiments/lra_cifar.yaml, models-2.13.1/official/projects/lra/experiments/lra_cifar_linformer.yaml, models-2.13.1/official/projects/lra/experiments/lra_imdb.yaml, models-2.13.1/official/projects/lra/experiments/lra_imdb_linformer.yaml, models-2.13.1/official/projects/lra/experiments/lra_listops.yaml, models-2.13.1/official/projects/lra/experiments/lra_listops_linformer.yaml, models-2.13.1/official/projects/lra/experiments/lra_pathfinder.yaml, models-2.13.1/official/projects/lra/experiments/lra_pathfinder_linformer.yaml, models-2.13.1/official/projects/lra/README.md, models-2.13.1/official/projects/lra/linformer.py, models-2.13.1/official/projects/lra/linformer_encoder.py, models-2.13.1/official/projects/lra/linformer_encoder_block.py, models-2.13.1/official/projects/lra/linformer_experiments.py, models-2.13.1/official/projects/lra/lra_dual_encoder.py, models-2.13.1/official/projects/lra/lra_dual_encoder_dataloader.py, models-2.13.1/official/projects/lra/lra_dual_encoder_task.py, models-2.13.1/official/projects/lra/train.py, models-2.13.1/official/projects/lra/transformer.py, models-2.13.1/official/projects/lra/transformer_encoder.py, models-2.13.1/official/projects/lra/transformer_experiments.py, models-2.13.1/official/projects/mae/configs/linear_probe.py, models-2.13.1/official/projects/mae/configs/mae.py, models-2.13.1/official/projects/mae/configs/vit.py, models-2.13.1/official/projects/mae/modeling/masked_ae.py, models-2.13.1/official/projects/mae/modeling/utils.py, models-2.13.1/official/projects/mae/modeling/vit.py, models-2.13.1/official/projects/mae/tasks/image_classification.py, models-2.13.1/official/projects/mae/tasks/image_classification_test.py, models-2.13.1/official/projects/mae/tasks/linear_probe.py, models-2.13.1/official/projects/mae/tasks/linear_probe_test.py, models-2.13.1/official/projects/mae/tasks/masked_ae.py, models-2.13.1/official/projects/mae/tasks/masked_ae_test.py, models-2.13.1/official/projects/mae/README.md, models-2.13.1/official/projects/mae/optimization.py, models-2.13.1/official/projects/mae/train.py, models-2.13.1/official/projects/maxvit/configs/experiments/coco_maxvitb_i640_crcnn.yaml, models-2.13.1/official/projects/maxvit/configs/experiments/coco_maxvitb_i896_crcnn.yaml, models-2.13.1/official/projects/maxvit/configs/experiments/coco_maxvitl_i896_crcnn.yaml, models-2.13.1/official/projects/maxvit/configs/experiments/coco_maxvits_i896_crcnn.yaml, models-2.13.1/official/projects/maxvit/configs/experiments/coco_maxvitt_i640_crcnn.yaml, models-2.13.1/official/projects/maxvit/configs/experiments/coco_maxvitxl_i896_crcnn.yaml, models-2.13.1/official/projects/maxvit/configs/experiments/finetune_maxvitb_imagenet_i384.yaml, models-2.13.1/official/projects/maxvit/configs/experiments/finetune_maxvitb_imagenet_i512.yaml, models-2.13.1/official/projects/maxvit/configs/experiments/finetune_maxvitl_imagenet_i384.yaml, models-2.13.1/official/projects/maxvit/configs/experiments/finetune_maxvitl_imagenet_i512.yaml, models-2.13.1/official/projects/maxvit/configs/experiments/finetune_maxvitxl_imagenet_i384.yaml, models-2.13.1/official/projects/maxvit/configs/experiments/finetune_maxvitxl_imagenet_i512.yaml, models-2.13.1/official/projects/maxvit/configs/experiments/maxvit_base_imagenet.yaml, models-2.13.1/official/projects/maxvit/configs/experiments/maxvit_base_imagenet_gpu.yaml, models-2.13.1/official/projects/maxvit/configs/experiments/maxvit_large_imagenet.yaml, models-2.13.1/official/projects/maxvit/configs/experiments/maxvit_small_imagenet.yaml, models-2.13.1/official/projects/maxvit/configs/experiments/maxvit_small_imagenet_gpu.yaml, models-2.13.1/official/projects/maxvit/configs/experiments/maxvit_tiny_imagenet.yaml, models-2.13.1/official/projects/maxvit/configs/experiments/maxvit_xlarge_imagenet.yaml, models-2.13.1/official/projects/maxvit/configs/experiments/retinanet_maxvit_base_coco_i1280_tpu.yaml, models-2.13.1/official/projects/maxvit/configs/experiments/retinanet_maxvit_base_coco_i640_tpu.yaml, models-2.13.1/official/projects/maxvit/configs/experiments/seg_coco_maxvits_i640.yaml, models-2.13.1/official/projects/maxvit/configs/experiments/seg_pascal_maxvits_i512.yaml, models-2.13.1/official/projects/maxvit/configs/__init__.py, models-2.13.1/official/projects/maxvit/configs/backbones.py, models-2.13.1/official/projects/maxvit/configs/image_classification.py, models-2.13.1/official/projects/maxvit/configs/image_classification_test.py, models-2.13.1/official/projects/maxvit/configs/rcnn.py, models-2.13.1/official/projects/maxvit/configs/rcnn_test.py, models-2.13.1/official/projects/maxvit/configs/retinanet.py, models-2.13.1/official/projects/maxvit/configs/retinanet_test.py, models-2.13.1/official/projects/maxvit/configs/semantic_segmentation.py, models-2.13.1/official/projects/maxvit/configs/semantic_segmentation_test.py, models-2.13.1/official/projects/maxvit/docs/i21k_jft_results.png, models-2.13.1/official/projects/maxvit/docs/imagenet_results.png, models-2.13.1/official/projects/maxvit/docs/maxvit_arch.png, models-2.13.1/official/projects/maxvit/modeling/__init__.py, models-2.13.1/official/projects/maxvit/modeling/common_ops.py, models-2.13.1/official/projects/maxvit/modeling/layers.py, models-2.13.1/official/projects/maxvit/modeling/maxvit.py, models-2.13.1/official/projects/maxvit/modeling/maxvit_test.py, models-2.13.1/official/projects/maxvit/README.md, models-2.13.1/official/projects/maxvit/__init__.py, models-2.13.1/official/projects/maxvit/train.py, models-2.13.1/official/projects/maxvit/train_test.py, models-2.13.1/official/projects/mobilebert/experiments/en_uncased_student.yaml, models-2.13.1/official/projects/mobilebert/experiments/en_uncased_teacher.yaml, models-2.13.1/official/projects/mobilebert/experiments/mobilebert_distillation_en_uncased.yaml, models-2.13.1/official/projects/mobilebert/README.md, models-2.13.1/official/projects/mobilebert/__init__.py, models-2.13.1/official/projects/mobilebert/distillation.py, models-2.13.1/official/projects/mobilebert/distillation_test.py, models-2.13.1/official/projects/mobilebert/export_tfhub.py, models-2.13.1/official/projects/mobilebert/model_utils.py, models-2.13.1/official/projects/mobilebert/run_distillation.py, models-2.13.1/official/projects/mobilebert/tf2_model_checkpoint_converter.py, models-2.13.1/official/projects/mobilebert/utils.py, models-2.13.1/official/projects/mosaic/configs/experiments/mosaic_mnv35_cityscapes_tfds_tpu.yaml, models-2.13.1/official/projects/mosaic/configs/mosaic_config.py, models-2.13.1/official/projects/mosaic/modeling/mosaic_blocks.py, models-2.13.1/official/projects/mosaic/modeling/mosaic_blocks_test.py, models-2.13.1/official/projects/mosaic/modeling/mosaic_head.py, models-2.13.1/official/projects/mosaic/modeling/mosaic_head_test.py, models-2.13.1/official/projects/mosaic/modeling/mosaic_model.py, models-2.13.1/official/projects/mosaic/modeling/mosaic_model_test.py, models-2.13.1/official/projects/mosaic/qat/configs/experiments/semantic_segmentation/mosaic_mnv35_cityscapes_tfds_qat_tpu.yaml, models-2.13.1/official/projects/mosaic/qat/configs/mosaic_config.py, models-2.13.1/official/projects/mosaic/qat/configs/mosaic_config_test.py, models-2.13.1/official/projects/mosaic/qat/modeling/heads/mosaic_head.py, models-2.13.1/official/projects/mosaic/qat/modeling/heads/mosaic_head_test.py, models-2.13.1/official/projects/mosaic/qat/modeling/layers/nn_blocks.py, models-2.13.1/official/projects/mosaic/qat/modeling/layers/nn_blocks_test.py, models-2.13.1/official/projects/mosaic/qat/modeling/factory.py, models-2.13.1/official/projects/mosaic/qat/modeling/factory_test.py, models-2.13.1/official/projects/mosaic/qat/serving/export_module.py, models-2.13.1/official/projects/mosaic/qat/serving/export_saved_model.py, models-2.13.1/official/projects/mosaic/qat/serving/export_tflite.py, models-2.13.1/official/projects/mosaic/qat/tasks/mosaic_tasks.py, models-2.13.1/official/projects/mosaic/qat/tasks/mosaic_tasks_test.py, models-2.13.1/official/projects/mosaic/README.md, models-2.13.1/official/projects/mosaic/mosaic_tasks.py, models-2.13.1/official/projects/mosaic/mosaic_tasks_test.py, models-2.13.1/official/projects/mosaic/mosaic_tutorial.ipynb, models-2.13.1/official/projects/mosaic/registry_imports.py, models-2.13.1/official/projects/mosaic/train.py, models-2.13.1/official/projects/movinet/configs/yaml/movinet_a0_k600_8x8.yaml, models-2.13.1/official/projects/movinet/configs/yaml/movinet_a0_k600_cpu_local.yaml, models-2.13.1/official/projects/movinet/configs/yaml/movinet_a0_stream_k600_8x8.yaml, models-2.13.1/official/projects/movinet/configs/yaml/movinet_a1_k600_8x8.yaml, models-2.13.1/official/projects/movinet/configs/yaml/movinet_a1_stream_k600_8x8.yaml, models-2.13.1/official/projects/movinet/configs/yaml/movinet_a2_k600_8x8.yaml, models-2.13.1/official/projects/movinet/configs/yaml/movinet_a2_stream_k600_8x8.yaml, models-2.13.1/official/projects/movinet/configs/yaml/movinet_a3_k600_8x8.yaml, models-2.13.1/official/projects/movinet/configs/yaml/movinet_a3_stream_k600_8x8.yaml, models-2.13.1/official/projects/movinet/configs/yaml/movinet_a4_k600_8x8.yaml, models-2.13.1/official/projects/movinet/configs/yaml/movinet_a4_stream_k600_8x8.yaml, models-2.13.1/official/projects/movinet/configs/yaml/movinet_a5_k600_8x8.yaml, models-2.13.1/official/projects/movinet/configs/yaml/movinet_a5_stream_k600_8x8.yaml, models-2.13.1/official/projects/movinet/configs/yaml/movinet_t0_k600_8x8.yaml, models-2.13.1/official/projects/movinet/configs/yaml/movinet_t0_stream_k600_8x8.yaml, models-2.13.1/official/projects/movinet/configs/__init__.py, models-2.13.1/official/projects/movinet/configs/movinet.py, models-2.13.1/official/projects/movinet/configs/movinet_test.py, models-2.13.1/official/projects/movinet/files/jumpingjack.gif, models-2.13.1/official/projects/movinet/files/kinetics_600_labels.txt, models-2.13.1/official/projects/movinet/modeling/__init__.py, models-2.13.1/official/projects/movinet/modeling/movinet.py, models-2.13.1/official/projects/movinet/modeling/movinet_layers.py, models-2.13.1/official/projects/movinet/modeling/movinet_layers_test.py, models-2.13.1/official/projects/movinet/modeling/movinet_model.py, models-2.13.1/official/projects/movinet/modeling/movinet_model_test.py, models-2.13.1/official/projects/movinet/modeling/movinet_test.py, models-2.13.1/official/projects/movinet/tools/__init__.py, models-2.13.1/official/projects/movinet/tools/convert_3d_2plus1d.py, models-2.13.1/official/projects/movinet/tools/convert_3d_2plus1d_test.py, models-2.13.1/official/projects/movinet/tools/export_saved_model.py, models-2.13.1/official/projects/movinet/tools/export_saved_model_test.py, models-2.13.1/official/projects/movinet/tools/plot_movinet_video_stream_predictions.ipynb, models-2.13.1/official/projects/movinet/tools/quantize_movinet.py, models-2.13.1/official/projects/movinet/README.md, models-2.13.1/official/projects/movinet/__init__.py, models-2.13.1/official/projects/movinet/movinet_streaming_model_training_and_inference.ipynb, models-2.13.1/official/projects/movinet/movinet_tutorial.ipynb, models-2.13.1/official/projects/movinet/requirements.txt, models-2.13.1/official/projects/movinet/train.py, models-2.13.1/official/projects/movinet/train_test.py, models-2.13.1/official/projects/mtop/README.md, models-2.13.1/official/projects/nhnet/testdata/crawled_articles/domain_0.com/url_000.html, models-2.13.1/official/projects/nhnet/testdata/crawled_articles/domain_0.com/url_000.json, models-2.13.1/official/projects/nhnet/testdata/crawled_articles/domain_1.com/url_001.html, models-2.13.1/official/projects/nhnet/testdata/crawled_articles/domain_1.com/url_001.json, models-2.13.1/official/projects/nhnet/testdata/stories.json, models-2.13.1/official/projects/nhnet/testdata/vocab.txt, models-2.13.1/official/projects/nhnet/README.md, models-2.13.1/official/projects/nhnet/__init__.py, models-2.13.1/official/projects/nhnet/configs.py, models-2.13.1/official/projects/nhnet/configs_test.py, models-2.13.1/official/projects/nhnet/decoder.py, models-2.13.1/official/projects/nhnet/decoder_test.py, models-2.13.1/official/projects/nhnet/evaluation.py, models-2.13.1/official/projects/nhnet/input_pipeline.py, models-2.13.1/official/projects/nhnet/models.py, models-2.13.1/official/projects/nhnet/models_test.py, models-2.13.1/official/projects/nhnet/optimizer.py, models-2.13.1/official/projects/nhnet/raw_data_process.py, models-2.13.1/official/projects/nhnet/raw_data_processor.py, models-2.13.1/official/projects/nhnet/trainer.py, models-2.13.1/official/projects/nhnet/trainer_test.py, models-2.13.1/official/projects/nhnet/utils.py, models-2.13.1/official/projects/panoptic/configs/experiments/r50fpn_1x_coco.yaml, models-2.13.1/official/projects/panoptic/configs/experiments/r50fpn_3x_coco.yaml, models-2.13.1/official/projects/panoptic/configs/__init__.py, models-2.13.1/official/projects/panoptic/configs/panoptic_deeplab.py, models-2.13.1/official/projects/panoptic/configs/panoptic_maskrcnn.py, models-2.13.1/official/projects/panoptic/dataloaders/panoptic_deeplab_input.py, models-2.13.1/official/projects/panoptic/dataloaders/panoptic_maskrcnn_input.py, models-2.13.1/official/projects/panoptic/losses/panoptic_deeplab_losses.py, models-2.13.1/official/projects/panoptic/modeling/heads/panoptic_deeplab_heads.py, models-2.13.1/official/projects/panoptic/modeling/layers/fusion_layers.py, models-2.13.1/official/projects/panoptic/modeling/layers/panoptic_deeplab_merge.py, models-2.13.1/official/projects/panoptic/modeling/layers/panoptic_segmentation_generator.py, models-2.13.1/official/projects/panoptic/modeling/layers/paste_masks.py, models-2.13.1/official/projects/panoptic/modeling/factory.py, models-2.13.1/official/projects/panoptic/modeling/panoptic_deeplab_model.py, models-2.13.1/official/projects/panoptic/modeling/panoptic_maskrcnn_model.py, models-2.13.1/official/projects/panoptic/ops/mask_ops.py, models-2.13.1/official/projects/panoptic/serving/export_saved_model.py, models-2.13.1/official/projects/panoptic/serving/panoptic_deeplab.py, models-2.13.1/official/projects/panoptic/serving/panoptic_maskrcnn.py, models-2.13.1/official/projects/panoptic/tasks/__init__.py, models-2.13.1/official/projects/panoptic/tasks/panoptic_deeplab.py, models-2.13.1/official/projects/panoptic/tasks/panoptic_maskrcnn.py, models-2.13.1/official/projects/panoptic/README.md, models-2.13.1/official/projects/panoptic/__init__.py, models-2.13.1/official/projects/panoptic/train.py, models-2.13.1/official/projects/perceiver/configs/experiments/glue_cola.yaml, models-2.13.1/official/projects/perceiver/configs/experiments/glue_mnli_m.yaml, models-2.13.1/official/projects/perceiver/configs/experiments/glue_mnli_mm.yaml, models-2.13.1/official/projects/perceiver/configs/experiments/glue_mrpc.yaml, models-2.13.1/official/projects/perceiver/configs/experiments/glue_qnli.yaml, models-2.13.1/official/projects/perceiver/configs/experiments/glue_qqp.yaml, models-2.13.1/official/projects/perceiver/configs/experiments/glue_rte.yaml, models-2.13.1/official/projects/perceiver/configs/experiments/glue_sst.yaml, models-2.13.1/official/projects/perceiver/configs/experiments/glue_stsb.yaml, models-2.13.1/official/projects/perceiver/configs/experiments/vizier_config_glue.pbtxt, models-2.13.1/official/projects/perceiver/configs/experiments/wiki_books_pretrain.yaml, models-2.13.1/official/projects/perceiver/configs/encoders.py, models-2.13.1/official/projects/perceiver/configs/perceiver.py, models-2.13.1/official/projects/perceiver/configs/perceiver_test.py, models-2.13.1/official/projects/perceiver/modeling/layers/decoder.py, models-2.13.1/official/projects/perceiver/modeling/layers/decoder_test.py, models-2.13.1/official/projects/perceiver/modeling/layers/encoder.py, models-2.13.1/official/projects/perceiver/modeling/layers/encoder_test.py, models-2.13.1/official/projects/perceiver/modeling/layers/utils.py, models-2.13.1/official/projects/perceiver/modeling/layers/utils_test.py, models-2.13.1/official/projects/perceiver/modeling/models/classifier.py, models-2.13.1/official/projects/perceiver/modeling/models/classifier_test.py, models-2.13.1/official/projects/perceiver/modeling/models/pretrainer.py, models-2.13.1/official/projects/perceiver/modeling/models/pretrainer_test.py, models-2.13.1/official/projects/perceiver/modeling/networks/positional_decoder.py, models-2.13.1/official/projects/perceiver/modeling/networks/positional_decoder_test.py, models-2.13.1/official/projects/perceiver/modeling/networks/sequence_encoder.py, models-2.13.1/official/projects/perceiver/modeling/networks/sequence_encoder_test.py, models-2.13.1/official/projects/perceiver/tasks/pretrain.py, models-2.13.1/official/projects/perceiver/tasks/pretrain_test.py, models-2.13.1/official/projects/perceiver/tasks/sentence_prediction.py, models-2.13.1/official/projects/perceiver/tasks/sentence_prediction_test.py, models-2.13.1/official/projects/perceiver/README.md, models-2.13.1/official/projects/perceiver/train.py, models-2.13.1/official/projects/pix2seq/configs/pix2seq.py, models-2.13.1/official/projects/pix2seq/configs/pix2seq_test.py, models-2.13.1/official/projects/pix2seq/dataloaders/pix2seq_input.py, models-2.13.1/official/projects/pix2seq/dataloaders/pix2seq_input_test.py, models-2.13.1/official/projects/pix2seq/modeling/pix2seq_model.py, models-2.13.1/official/projects/pix2seq/modeling/pix2seq_model_test.py, models-2.13.1/official/projects/pix2seq/modeling/transformer.py, models-2.13.1/official/projects/pix2seq/modeling/transformer_test.py, models-2.13.1/official/projects/pix2seq/tasks/pix2seq_task.py, models-2.13.1/official/projects/pix2seq/README.md, models-2.13.1/official/projects/pix2seq/train.py, models-2.13.1/official/projects/pix2seq/utils.py, models-2.13.1/official/projects/pointpillars/configs/vehicle/pointpillars_3d_baseline_gpu.yaml, models-2.13.1/official/projects/pointpillars/configs/vehicle/pointpillars_3d_baseline_local.yaml, models-2.13.1/official/projects/pointpillars/configs/vehicle/pointpillars_3d_baseline_tpu.yaml, models-2.13.1/official/projects/pointpillars/configs/pointpillars.py, models-2.13.1/official/projects/pointpillars/configs/pointpillars_test.py, models-2.13.1/official/projects/pointpillars/dataloaders/decoders.py, models-2.13.1/official/projects/pointpillars/dataloaders/decoders_test.py, models-2.13.1/official/projects/pointpillars/dataloaders/parsers.py, models-2.13.1/official/projects/pointpillars/dataloaders/parsers_test.py, models-2.13.1/official/projects/pointpillars/modeling/backbones.py, models-2.13.1/official/projects/pointpillars/modeling/backbones_test.py, models-2.13.1/official/projects/pointpillars/modeling/decoders.py, models-2.13.1/official/projects/pointpillars/modeling/decoders_test.py, models-2.13.1/official/projects/pointpillars/modeling/factory.py, models-2.13.1/official/projects/pointpillars/modeling/factory_test.py, models-2.13.1/official/projects/pointpillars/modeling/featurizers.py, models-2.13.1/official/projects/pointpillars/modeling/featurizers_test.py, models-2.13.1/official/projects/pointpillars/modeling/heads.py, models-2.13.1/official/projects/pointpillars/modeling/heads_test.py, models-2.13.1/official/projects/pointpillars/modeling/layers.py, models-2.13.1/official/projects/pointpillars/modeling/layers_test.py, models-2.13.1/official/projects/pointpillars/modeling/models.py, models-2.13.1/official/projects/pointpillars/modeling/models_test.py, models-2.13.1/official/projects/pointpillars/tasks/pointpillars.py, models-2.13.1/official/projects/pointpillars/tasks/pointpillars_test.py, models-2.13.1/official/projects/pointpillars/tools/export_model.py, models-2.13.1/official/projects/pointpillars/tools/process_wod.py, models-2.13.1/official/projects/pointpillars/utils/model_exporter.py, models-2.13.1/official/projects/pointpillars/utils/utils.py, models-2.13.1/official/projects/pointpillars/utils/utils_test.py, models-2.13.1/official/projects/pointpillars/utils/wod_detection_evaluator.py, models-2.13.1/official/projects/pointpillars/utils/wod_processor.py, models-2.13.1/official/projects/pointpillars/README.md, models-2.13.1/official/projects/pointpillars/registry_imports.py, models-2.13.1/official/projects/pointpillars/train.py, models-2.13.1/official/projects/pruning/configs/experiments/image_classification/imagenet_mobilenetv2_pruning_gpu.yaml, models-2.13.1/official/projects/pruning/configs/experiments/image_classification/imagenet_resnet50_pruning_gpu.yaml, models-2.13.1/official/projects/pruning/configs/__init__.py, models-2.13.1/official/projects/pruning/configs/image_classification.py, models-2.13.1/official/projects/pruning/configs/image_classification_test.py, models-2.13.1/official/projects/pruning/tasks/__init__.py, models-2.13.1/official/projects/pruning/tasks/image_classification.py, models-2.13.1/official/projects/pruning/tasks/image_classification_test.py, models-2.13.1/official/projects/pruning/README.md, models-2.13.1/official/projects/pruning/registry_imports.py, models-2.13.1/official/projects/pruning/train.py, models-2.13.1/official/projects/qat/nlp/configs/experiments/squad_v1_mobilebert_xs_qat_1gpu.yaml, models-2.13.1/official/projects/qat/nlp/configs/experiments/squad_v1_qat_1gpu.yaml, models-2.13.1/official/projects/qat/nlp/configs/__init__.py, models-2.13.1/official/projects/qat/nlp/configs/finetuning_experiments.py, models-2.13.1/official/projects/qat/nlp/docs/MobileBERT_QAT_tutorial.ipynb, models-2.13.1/official/projects/qat/nlp/modeling/layers/__init__.py, models-2.13.1/official/projects/qat/nlp/modeling/layers/mobile_bert_layers.py, models-2.13.1/official/projects/qat/nlp/modeling/layers/multi_head_attention.py, models-2.13.1/official/projects/qat/nlp/modeling/layers/transformer_encoder_block.py, models-2.13.1/official/projects/qat/nlp/modeling/layers/transformer_encoder_block_test.py, models-2.13.1/official/projects/qat/nlp/modeling/models/__init__.py, models-2.13.1/official/projects/qat/nlp/modeling/models/bert_span_labeler.py, models-2.13.1/official/projects/qat/nlp/modeling/networks/__init__.py, models-2.13.1/official/projects/qat/nlp/modeling/networks/span_labeling.py, models-2.13.1/official/projects/qat/nlp/modeling/__init__.py, models-2.13.1/official/projects/qat/nlp/quantization/__init__.py, models-2.13.1/official/projects/qat/nlp/quantization/configs.py, models-2.13.1/official/projects/qat/nlp/quantization/configs_test.py, models-2.13.1/official/projects/qat/nlp/quantization/helper.py, models-2.13.1/official/projects/qat/nlp/quantization/schemes.py, models-2.13.1/official/projects/qat/nlp/quantization/wrappers.py, models-2.13.1/official/projects/qat/nlp/tasks/__init__.py, models-2.13.1/official/projects/qat/nlp/tasks/question_answering.py, models-2.13.1/official/projects/qat/nlp/tasks/question_answering_test.py, models-2.13.1/official/projects/qat/nlp/README.md, models-2.13.1/official/projects/qat/nlp/__init__.py, models-2.13.1/official/projects/qat/nlp/pretrained_checkpoint_converter.py, models-2.13.1/official/projects/qat/nlp/registry_imports.py, models-2.13.1/official/projects/qat/nlp/train.py, models-2.13.1/official/projects/qat/vision/configs/experiments/image_classification/imagenet_mobilenetv2_qat_gpu.yaml, models-2.13.1/official/projects/qat/vision/configs/experiments/image_classification/imagenet_mobilenetv2_qat_gpu_batch256.yaml, models-2.13.1/official/projects/qat/vision/configs/experiments/image_classification/imagenet_mobilenetv2_qat_gpu_batch512.yaml, models-2.13.1/official/projects/qat/vision/configs/experiments/image_classification/imagenet_mobilenetv2_qat_tpu.yaml, models-2.13.1/official/projects/qat/vision/configs/experiments/image_classification/imagenet_mobilenetv3.5_qat_gpu.yaml, models-2.13.1/official/projects/qat/vision/configs/experiments/image_classification/imagenet_mobilenetv3.5_qat_tpu.yaml, models-2.13.1/official/projects/qat/vision/configs/experiments/image_classification/imagenet_mobilenetv3large_qat_tpu.yaml, models-2.13.1/official/projects/qat/vision/configs/experiments/image_classification/imagenet_resnet50_qat_gpu.yaml, models-2.13.1/official/projects/qat/vision/configs/experiments/image_classification/imagenet_resnet50_qat_gpu_fast.yaml, models-2.13.1/official/projects/qat/vision/configs/experiments/image_classification/imagenet_resnet50_qat_gpu_fast_4x4.yaml, models-2.13.1/official/projects/qat/vision/configs/experiments/image_classification/imagenet_resnet50_qat_gpu_fast_4x8.yaml, models-2.13.1/official/projects/qat/vision/configs/experiments/image_classification/imagenet_resnet50_qat_gpu_fast_6x6.yaml, models-2.13.1/official/projects/qat/vision/configs/experiments/retinanet/coco_mobilenetv2_qat_tpu_e2e.yaml, models-2.13.1/official/projects/qat/vision/configs/experiments/retinanet/coco_mobilenetv3.5_avg_qat_tpu_e2e.yaml, models-2.13.1/official/projects/qat/vision/configs/experiments/retinanet/coco_spinenet49_mobile_qat_gpu.yaml, models-2.13.1/official/projects/qat/vision/configs/experiments/retinanet/coco_spinenet49_mobile_qat_tpu.yaml, models-2.13.1/official/projects/qat/vision/configs/experiments/retinanet/coco_spinenet49_mobile_qat_tpu_e2e.yaml, models-2.13.1/official/projects/qat/vision/configs/experiments/semantic_segmentation/deeplabv3_mobilenetv2_pascal_qat_gpu.yaml, models-2.13.1/official/projects/qat/vision/configs/experiments/semantic_segmentation/deeplabv3_mobilenetv2_pascal_qat_tpu.yaml, models-2.13.1/official/projects/qat/vision/configs/experiments/semantic_segmentation/deeplabv3plus_mobilenetv2_cityscapes_qat_tpu.yaml, models-2.13.1/official/projects/qat/vision/configs/__init__.py, models-2.13.1/official/projects/qat/vision/configs/common.py, models-2.13.1/official/projects/qat/vision/configs/image_classification.py, models-2.13.1/official/projects/qat/vision/configs/image_classification_test.py, models-2.13.1/official/projects/qat/vision/configs/retinanet.py, models-2.13.1/official/projects/qat/vision/configs/retinanet_test.py, models-2.13.1/official/projects/qat/vision/configs/semantic_segmentation.py, models-2.13.1/official/projects/qat/vision/configs/semantic_segmentation_test.py, models-2.13.1/official/projects/qat/vision/docs/qat_tutorial.ipynb, models-2.13.1/official/projects/qat/vision/modeling/heads/__init__.py, models-2.13.1/official/projects/qat/vision/modeling/heads/dense_prediction_heads.py, models-2.13.1/official/projects/qat/vision/modeling/heads/dense_prediction_heads_test.py, models-2.13.1/official/projects/qat/vision/modeling/layers/__init__.py, models-2.13.1/official/projects/qat/vision/modeling/layers/nn_blocks.py, models-2.13.1/official/projects/qat/vision/modeling/layers/nn_blocks_test.py, models-2.13.1/official/projects/qat/vision/modeling/layers/nn_layers.py, models-2.13.1/official/projects/qat/vision/modeling/layers/nn_layers_test.py, models-2.13.1/official/projects/qat/vision/modeling/__init__.py, models-2.13.1/official/projects/qat/vision/modeling/factory.py, models-2.13.1/official/projects/qat/vision/modeling/factory_test.py, models-2.13.1/official/projects/qat/vision/modeling/segmentation_model.py, models-2.13.1/official/projects/qat/vision/n_bit/__init__.py, models-2.13.1/official/projects/qat/vision/n_bit/configs.py, models-2.13.1/official/projects/qat/vision/n_bit/configs_test.py, models-2.13.1/official/projects/qat/vision/n_bit/nn_blocks.py, models-2.13.1/official/projects/qat/vision/n_bit/nn_blocks_test.py, models-2.13.1/official/projects/qat/vision/n_bit/nn_layers.py, models-2.13.1/official/projects/qat/vision/n_bit/schemes.py, models-2.13.1/official/projects/qat/vision/quantization/__init__.py, models-2.13.1/official/projects/qat/vision/quantization/configs.py, models-2.13.1/official/projects/qat/vision/quantization/configs_test.py, models-2.13.1/official/projects/qat/vision/quantization/helper.py, models-2.13.1/official/projects/qat/vision/quantization/helper_test.py, models-2.13.1/official/projects/qat/vision/quantization/layer_transforms.py, models-2.13.1/official/projects/qat/vision/quantization/schemes.py, models-2.13.1/official/projects/qat/vision/serving/__init__.py, models-2.13.1/official/projects/qat/vision/serving/export_module.py, models-2.13.1/official/projects/qat/vision/serving/export_saved_model.py, models-2.13.1/official/projects/qat/vision/serving/export_tflite.py, models-2.13.1/official/projects/qat/vision/tasks/__init__.py, models-2.13.1/official/projects/qat/vision/tasks/image_classification.py, models-2.13.1/official/projects/qat/vision/tasks/image_classification_test.py, models-2.13.1/official/projects/qat/vision/tasks/retinanet.py, models-2.13.1/official/projects/qat/vision/tasks/retinanet_test.py, models-2.13.1/official/projects/qat/vision/tasks/semantic_segmentation.py, models-2.13.1/official/projects/qat/vision/README.md, models-2.13.1/official/projects/qat/vision/__init__.py, models-2.13.1/official/projects/qat/vision/registry_imports.py, models-2.13.1/official/projects/qat/vision/train.py, models-2.13.1/official/projects/qat/__init__.py, models-2.13.1/official/projects/roformer/experiments/roformer_base.yaml, models-2.13.1/official/projects/roformer/README.md, models-2.13.1/official/projects/roformer/__init__.py, models-2.13.1/official/projects/roformer/roformer.py, models-2.13.1/official/projects/roformer/roformer_attention.py, models-2.13.1/official/projects/roformer/roformer_attention_test.py, models-2.13.1/official/projects/roformer/roformer_encoder.py, models-2.13.1/official/projects/roformer/roformer_encoder_block.py, models-2.13.1/official/projects/roformer/roformer_encoder_block_test.py, models-2.13.1/official/projects/roformer/roformer_encoder_test.py, models-2.13.1/official/projects/roformer/roformer_experiments.py, models-2.13.1/official/projects/roformer/train.py, models-2.13.1/official/projects/s3d/configs/s3d.py, models-2.13.1/official/projects/s3d/modeling/inception_utils.py, models-2.13.1/official/projects/s3d/modeling/inception_utils_test.py, models-2.13.1/official/projects/s3d/modeling/net_utils.py, models-2.13.1/official/projects/s3d/modeling/net_utils_test.py, models-2.13.1/official/projects/s3d/modeling/s3d.py, models-2.13.1/official/projects/s3d/modeling/s3d_test.py, models-2.13.1/official/projects/s3d/train.py, models-2.13.1/official/projects/simclr/common/registry_imports.py, models-2.13.1/official/projects/simclr/configs/experiments/cifar_simclr_pretrain.yaml, models-2.13.1/official/projects/simclr/configs/experiments/imagenet_simclr_finetune_gpu.yaml, models-2.13.1/official/projects/simclr/configs/experiments/imagenet_simclr_finetune_tpu.yaml, models-2.13.1/official/projects/simclr/configs/experiments/imagenet_simclr_multitask_tpu.yaml, models-2.13.1/official/projects/simclr/configs/experiments/imagenet_simclr_pretrain_gpu.yaml, models-2.13.1/official/projects/simclr/configs/experiments/imagenet_simclr_pretrain_tpu.yaml, models-2.13.1/official/projects/simclr/configs/multitask_config.py, models-2.13.1/official/projects/simclr/configs/multitask_config_test.py, models-2.13.1/official/projects/simclr/configs/simclr.py, models-2.13.1/official/projects/simclr/configs/simclr_test.py, models-2.13.1/official/projects/simclr/dataloaders/preprocess_ops.py, models-2.13.1/official/projects/simclr/dataloaders/simclr_input.py, models-2.13.1/official/projects/simclr/heads/simclr_head.py, models-2.13.1/official/projects/simclr/heads/simclr_head_test.py, models-2.13.1/official/projects/simclr/losses/contrastive_losses.py, models-2.13.1/official/projects/simclr/losses/contrastive_losses_test.py, models-2.13.1/official/projects/simclr/modeling/layers/nn_blocks.py, models-2.13.1/official/projects/simclr/modeling/layers/nn_blocks_test.py, models-2.13.1/official/projects/simclr/modeling/multitask_model.py, models-2.13.1/official/projects/simclr/modeling/multitask_model_test.py, models-2.13.1/official/projects/simclr/modeling/simclr_model.py, models-2.13.1/official/projects/simclr/modeling/simclr_model_test.py, models-2.13.1/official/projects/simclr/tasks/simclr.py, models-2.13.1/official/projects/simclr/README.md, models-2.13.1/official/projects/simclr/multitask_train.py, models-2.13.1/official/projects/simclr/train.py, models-2.13.1/official/projects/teams/experiments/base/glue_mnli.yaml, models-2.13.1/official/projects/teams/experiments/base/squad_v1.yaml, models-2.13.1/official/projects/teams/experiments/base/squad_v2.yaml, models-2.13.1/official/projects/teams/experiments/base/wiki_books_pretrain.yaml, models-2.13.1/official/projects/teams/experiments/small/glue_mnli.yaml, models-2.13.1/official/projects/teams/experiments/small/squad_v1.yaml, models-2.13.1/official/projects/teams/experiments/small/squad_v2.yaml, models-2.13.1/official/projects/teams/experiments/small/wiki_books_pretrain.yaml, models-2.13.1/official/projects/teams/experiments/teams_en_uncased_base.yaml, models-2.13.1/official/projects/teams/experiments/teams_en_uncased_small.yaml, models-2.13.1/official/projects/teams/README.md, models-2.13.1/official/projects/teams/__init__.py, models-2.13.1/official/projects/teams/teams.py, models-2.13.1/official/projects/teams/teams_experiments.py, models-2.13.1/official/projects/teams/teams_pretrainer.py, models-2.13.1/official/projects/teams/teams_pretrainer_test.py, models-2.13.1/official/projects/teams/teams_task.py, models-2.13.1/official/projects/teams/teams_task_test.py, models-2.13.1/official/projects/teams/train.py, models-2.13.1/official/projects/text_classification_example/experiments/classification_ft_cola.yaml, models-2.13.1/official/projects/text_classification_example/experiments/local_example.yaml, models-2.13.1/official/projects/text_classification_example/README.md, models-2.13.1/official/projects/text_classification_example/classification_data_loader.py, models-2.13.1/official/projects/text_classification_example/classification_example.py, models-2.13.1/official/projects/text_classification_example/classification_example_test.py, models-2.13.1/official/projects/text_classification_example/train.py, models-2.13.1/official/projects/tn_bert/README.md, models-2.13.1/official/projects/token_dropping/README.md, models-2.13.1/official/projects/token_dropping/bert_en_uncased_base_token_drop.yaml, models-2.13.1/official/projects/token_dropping/encoder.py, models-2.13.1/official/projects/token_dropping/encoder_config.py, models-2.13.1/official/projects/token_dropping/encoder_test.py, models-2.13.1/official/projects/token_dropping/experiment_configs.py, models-2.13.1/official/projects/token_dropping/masked_lm.py, models-2.13.1/official/projects/token_dropping/masked_lm_test.py, models-2.13.1/official/projects/token_dropping/train.py, models-2.13.1/official/projects/token_dropping/wiki_books_pretrain.yaml, models-2.13.1/official/projects/token_dropping/wiki_books_pretrain_sequence_pack.yaml, models-2.13.1/official/projects/triviaqa/__init__.py, models-2.13.1/official/projects/triviaqa/dataset.py, models-2.13.1/official/projects/triviaqa/download_and_prepare.py, models-2.13.1/official/projects/triviaqa/evaluate.py, models-2.13.1/official/projects/triviaqa/evaluation.py, models-2.13.1/official/projects/triviaqa/inputs.py, models-2.13.1/official/projects/triviaqa/modeling.py, models-2.13.1/official/projects/triviaqa/predict.py, models-2.13.1/official/projects/triviaqa/prediction.py, models-2.13.1/official/projects/triviaqa/preprocess.py, models-2.13.1/official/projects/triviaqa/sentencepiece_pb2.py, models-2.13.1/official/projects/triviaqa/train.py, models-2.13.1/official/projects/unified_detector/configs/gin_files/unified_detector_model.gin, models-2.13.1/official/projects/unified_detector/configs/gin_files/unified_detector_train.gin, models-2.13.1/official/projects/unified_detector/configs/ocr_config.py, models-2.13.1/official/projects/unified_detector/data_conversion/convert.py, models-2.13.1/official/projects/unified_detector/data_conversion/utils.py, models-2.13.1/official/projects/unified_detector/data_loaders/autoaugment.py, models-2.13.1/official/projects/unified_detector/data_loaders/input_reader.py, models-2.13.1/official/projects/unified_detector/data_loaders/tf_example_decoder.py, models-2.13.1/official/projects/unified_detector/data_loaders/universal_detection_parser.py, models-2.13.1/official/projects/unified_detector/docs/images/task.png, models-2.13.1/official/projects/unified_detector/modeling/universal_detector.py, models-2.13.1/official/projects/unified_detector/tasks/all_models.py, models-2.13.1/official/projects/unified_detector/tasks/ocr_task.py, models-2.13.1/official/projects/unified_detector/utils/typing.py, models-2.13.1/official/projects/unified_detector/utils/utilities.py, models-2.13.1/official/projects/unified_detector/README.md, models-2.13.1/official/projects/unified_detector/external_configurables.py, models-2.13.1/official/projects/unified_detector/registry_imports.py, models-2.13.1/official/projects/unified_detector/requirements.txt, models-2.13.1/official/projects/unified_detector/run_inference.py, models-2.13.1/official/projects/unified_detector/train.py, models-2.13.1/official/projects/video_ssl/configs/experiments/cvrl_linear_eval_k600.yaml, models-2.13.1/official/projects/video_ssl/configs/experiments/cvrl_pretrain_k600_200ep.yaml, models-2.13.1/official/projects/video_ssl/configs/__init__.py, models-2.13.1/official/projects/video_ssl/configs/video_ssl.py, models-2.13.1/official/projects/video_ssl/configs/video_ssl_test.py, models-2.13.1/official/projects/video_ssl/dataloaders/__init__.py, models-2.13.1/official/projects/video_ssl/dataloaders/video_ssl_input.py, models-2.13.1/official/projects/video_ssl/dataloaders/video_ssl_input_test.py, models-2.13.1/official/projects/video_ssl/losses/__init__.py, models-2.13.1/official/projects/video_ssl/losses/losses.py, models-2.13.1/official/projects/video_ssl/modeling/__init__.py, models-2.13.1/official/projects/video_ssl/modeling/video_ssl_model.py, models-2.13.1/official/projects/video_ssl/ops/__init__.py, models-2.13.1/official/projects/video_ssl/ops/video_ssl_preprocess_ops.py, models-2.13.1/official/projects/video_ssl/ops/video_ssl_preprocess_ops_test.py, models-2.13.1/official/projects/video_ssl/tasks/__init__.py, models-2.13.1/official/projects/video_ssl/tasks/linear_eval.py, models-2.13.1/official/projects/video_ssl/tasks/pretrain.py, models-2.13.1/official/projects/video_ssl/tasks/pretrain_test.py, models-2.13.1/official/projects/video_ssl/README.md, models-2.13.1/official/projects/video_ssl/__init__.py, models-2.13.1/official/projects/video_ssl/train.py, models-2.13.1/official/projects/video_ssl/video_ssl.ipynb, models-2.13.1/official/projects/volumetric_models/configs/backbones.py, models-2.13.1/official/projects/volumetric_models/configs/decoders.py, models-2.13.1/official/projects/volumetric_models/configs/semantic_segmentation_3d.py, models-2.13.1/official/projects/volumetric_models/configs/semantic_segmentation_3d_test.py, models-2.13.1/official/projects/volumetric_models/dataloaders/segmentation_input_3d.py, models-2.13.1/official/projects/volumetric_models/dataloaders/segmentation_input_3d_test.py, models-2.13.1/official/projects/volumetric_models/evaluation/segmentation_metrics.py, models-2.13.1/official/projects/volumetric_models/evaluation/segmentation_metrics_test.py, models-2.13.1/official/projects/volumetric_models/losses/segmentation_losses.py, models-2.13.1/official/projects/volumetric_models/losses/segmentation_losses_test.py, models-2.13.1/official/projects/volumetric_models/modeling/backbones/__init__.py, models-2.13.1/official/projects/volumetric_models/modeling/backbones/unet_3d.py, models-2.13.1/official/projects/volumetric_models/modeling/backbones/unet_3d_test.py, models-2.13.1/official/projects/volumetric_models/modeling/decoders/__init__.py, models-2.13.1/official/projects/volumetric_models/modeling/decoders/factory.py, models-2.13.1/official/projects/volumetric_models/modeling/decoders/factory_test.py, models-2.13.1/official/projects/volumetric_models/modeling/decoders/unet_3d_decoder.py, models-2.13.1/official/projects/volumetric_models/modeling/decoders/unet_3d_decoder_test.py, models-2.13.1/official/projects/volumetric_models/modeling/heads/segmentation_heads_3d.py, models-2.13.1/official/projects/volumetric_models/modeling/heads/segmentation_heads_3d_test.py, models-2.13.1/official/projects/volumetric_models/modeling/factory.py, models-2.13.1/official/projects/volumetric_models/modeling/factory_test.py, models-2.13.1/official/projects/volumetric_models/modeling/nn_blocks_3d.py, models-2.13.1/official/projects/volumetric_models/modeling/nn_blocks_3d_test.py, models-2.13.1/official/projects/volumetric_models/modeling/segmentation_model_test.py, models-2.13.1/official/projects/volumetric_models/serving/export_saved_model.py, models-2.13.1/official/projects/volumetric_models/serving/semantic_segmentation_3d.py, models-2.13.1/official/projects/volumetric_models/serving/semantic_segmentation_3d_test.py, models-2.13.1/official/projects/volumetric_models/tasks/semantic_segmentation_3d.py, models-2.13.1/official/projects/volumetric_models/tasks/semantic_segmentation_3d_test.py, models-2.13.1/official/projects/volumetric_models/README.md, models-2.13.1/official/projects/volumetric_models/registry_imports.py, models-2.13.1/official/projects/volumetric_models/train.py, models-2.13.1/official/projects/volumetric_models/train_test.py, models-2.13.1/official/projects/waste_identification_ml/model_conversion/checkpoints_to_savedModel_to_tflite.ipynb, models-2.13.1/official/projects/waste_identification_ml/model_inference/TFHub_saved_model_inference.ipynb, models-2.13.1/official/projects/waste_identification_ml/model_inference/saved_model_inference.ipynb, models-2.13.1/official/projects/waste_identification_ml/model_inference/tflite_model_inference.ipynb, models-2.13.1/official/projects/waste_identification_ml/pre_processing/config/data/material_form_labels.pbtxt, models-2.13.1/official/projects/waste_identification_ml/pre_processing/config/data/material_labels.pbtxt, models-2.13.1/official/projects/waste_identification_ml/pre_processing/config/data/plastic_type_labels.pbtxt, models-2.13.1/official/projects/waste_identification_ml/pre_processing/config/sample_images/ffdeb4cd-43ba-4ca0-a1e6-aa5824005f44.jpg, models-2.13.1/official/projects/waste_identification_ml/pre_processing/config/sample_images/image_2.png, models-2.13.1/official/projects/waste_identification_ml/pre_processing/config/sample_images/image_3.jpg, models-2.13.1/official/projects/waste_identification_ml/pre_processing/config/sample_json/dataset.json, models-2.13.1/official/projects/waste_identification_ml/pre_processing/config/sample_json/ffdeb4cd-43ba-4ca0-a1e6-aa5824005f44.json, models-2.13.1/official/projects/waste_identification_ml/pre_processing/config/categories_list_of_dictionaries.py, models-2.13.1/official/projects/waste_identification_ml/pre_processing/config/config.ini, models-2.13.1/official/projects/waste_identification_ml/pre_processing/config/visualization.py, models-2.13.1/official/projects/waste_identification_ml/pre_processing/bb_to_mask_to_coco.ipynb, models-2.13.1/official/projects/waste_identification_ml/pre_processing/coco_to_tfrecord.ipynb, models-2.13.1/official/projects/waste_identification_ml/pre_processing/json_preparation.ipynb, models-2.13.1/official/projects/waste_identification_ml/pre_processing/labelme_to_coco.ipynb, models-2.13.1/official/projects/waste_identification_ml/pre_processing/merge_coco_files.ipynb, models-2.13.1/official/projects/waste_identification_ml/pre_processing/split_coco_files.ipynb, models-2.13.1/official/projects/waste_identification_ml/README.md, models-2.13.1/official/projects/yolo/common/__init__.py, models-2.13.1/official/projects/yolo/common/registry_imports.py, models-2.13.1/official/projects/yolo/configs/experiments/darknet/csp_darknet53.yaml, models-2.13.1/official/projects/yolo/configs/experiments/darknet/csp_darknet53_tfds.yaml, models-2.13.1/official/projects/yolo/configs/experiments/darknet/darknet53.yaml, models-2.13.1/official/projects/yolo/configs/experiments/darknet/darknet53_tfds.yaml, models-2.13.1/official/projects/yolo/configs/experiments/scaled-yolo/detection/yolo_csp_640_gpu.yaml, models-2.13.1/official/projects/yolo/configs/experiments/scaled-yolo/detection/yolo_csp_640_tpu.yaml, models-2.13.1/official/projects/yolo/configs/experiments/scaled-yolo/detection/yolo_l_p5_896_gpu.yaml, models-2.13.1/official/projects/yolo/configs/experiments/scaled-yolo/detection/yolo_l_p5_896_tpu.yaml, models-2.13.1/official/projects/yolo/configs/experiments/scaled-yolo/detection/yolo_l_p6_1280_gpu.yaml, models-2.13.1/official/projects/yolo/configs/experiments/scaled-yolo/detection/yolo_l_p6_1280_tpu.yaml, models-2.13.1/official/projects/yolo/configs/experiments/scaled-yolo/detection/yolo_l_p7_1536_tpu.yaml, models-2.13.1/official/projects/yolo/configs/experiments/scaled-yolo/tpu/640.yaml, models-2.13.1/official/projects/yolo/configs/experiments/yolov4/detection/scaled_yolov4_1280_gpu.yaml, models-2.13.1/official/projects/yolo/configs/experiments/yolov4/detection/yolov4_512_tpu.yaml, models-2.13.1/official/projects/yolo/configs/experiments/yolov4/imagenet_pretraining/cspdarknet53_256_tpu.yaml, models-2.13.1/official/projects/yolo/configs/experiments/yolov7/detection/yolov7.yaml, models-2.13.1/official/projects/yolo/configs/experiments/yolov7/detection/yolov7_gpu.yaml, models-2.13.1/official/projects/yolo/configs/__init__.py, models-2.13.1/official/projects/yolo/configs/backbones.py, models-2.13.1/official/projects/yolo/configs/darknet_classification.py, models-2.13.1/official/projects/yolo/configs/decoders.py, models-2.13.1/official/projects/yolo/configs/yolo.py, models-2.13.1/official/projects/yolo/configs/yolov7.py, models-2.13.1/official/projects/yolo/dataloaders/__init__.py, models-2.13.1/official/projects/yolo/dataloaders/classification_input.py, models-2.13.1/official/projects/yolo/dataloaders/tf_example_decoder.py, models-2.13.1/official/projects/yolo/dataloaders/yolo_input.py, models-2.13.1/official/projects/yolo/losses/__init__.py, models-2.13.1/official/projects/yolo/losses/yolo_loss.py, models-2.13.1/official/projects/yolo/losses/yolo_loss_test.py, models-2.13.1/official/projects/yolo/losses/yolov7_loss.py, models-2.13.1/official/projects/yolo/losses/yolov7_loss_test.py, models-2.13.1/official/projects/yolo/modeling/backbones/__init__.py, models-2.13.1/official/projects/yolo/modeling/backbones/darknet.py, models-2.13.1/official/projects/yolo/modeling/backbones/darknet_test.py, models-2.13.1/official/projects/yolo/modeling/backbones/yolov7.py, models-2.13.1/official/projects/yolo/modeling/backbones/yolov7_test.py, models-2.13.1/official/projects/yolo/modeling/decoders/__init__.py, models-2.13.1/official/projects/yolo/modeling/decoders/yolo_decoder.py, models-2.13.1/official/projects/yolo/modeling/decoders/yolo_decoder_test.py, models-2.13.1/official/projects/yolo/modeling/decoders/yolov7.py, models-2.13.1/official/projects/yolo/modeling/decoders/yolov7_test.py, models-2.13.1/official/projects/yolo/modeling/heads/__init__.py, models-2.13.1/official/projects/yolo/modeling/heads/yolo_head.py, models-2.13.1/official/projects/yolo/modeling/heads/yolo_head_test.py, models-2.13.1/official/projects/yolo/modeling/heads/yolov7_head.py, models-2.13.1/official/projects/yolo/modeling/heads/yolov7_head_test.py, models-2.13.1/official/projects/yolo/modeling/layers/__init__.py, models-2.13.1/official/projects/yolo/modeling/layers/detection_generator.py, models-2.13.1/official/projects/yolo/modeling/layers/detection_generator_test.py, models-2.13.1/official/projects/yolo/modeling/layers/nn_blocks.py, models-2.13.1/official/projects/yolo/modeling/layers/nn_blocks_test.py, models-2.13.1/official/projects/yolo/modeling/__init__.py, models-2.13.1/official/projects/yolo/modeling/factory.py, models-2.13.1/official/projects/yolo/modeling/factory_test.py, models-2.13.1/official/projects/yolo/modeling/yolo_model.py, models-2.13.1/official/projects/yolo/modeling/yolov7_model.py, models-2.13.1/official/projects/yolo/ops/__init__.py, models-2.13.1/official/projects/yolo/ops/anchor.py, models-2.13.1/official/projects/yolo/ops/box_ops.py, models-2.13.1/official/projects/yolo/ops/box_ops_test.py, models-2.13.1/official/projects/yolo/ops/initializer_ops.py, models-2.13.1/official/projects/yolo/ops/kmeans_anchors.py, models-2.13.1/official/projects/yolo/ops/kmeans_anchors_test.py, models-2.13.1/official/projects/yolo/ops/loss_utils.py, models-2.13.1/official/projects/yolo/ops/math_ops.py, models-2.13.1/official/projects/yolo/ops/mosaic.py, models-2.13.1/official/projects/yolo/ops/preprocessing_ops.py, models-2.13.1/official/projects/yolo/ops/preprocessing_ops_test.py, models-2.13.1/official/projects/yolo/optimization/configs/__init__.py, models-2.13.1/official/projects/yolo/optimization/configs/optimization_config.py, models-2.13.1/official/projects/yolo/optimization/configs/optimizer_config.py, models-2.13.1/official/projects/yolo/optimization/__init__.py, models-2.13.1/official/projects/yolo/optimization/optimizer_factory.py, models-2.13.1/official/projects/yolo/optimization/sgd_torch.py, models-2.13.1/official/projects/yolo/serving/__init__.py, models-2.13.1/official/projects/yolo/serving/export_module_factory.py, models-2.13.1/official/projects/yolo/serving/export_saved_model.py, models-2.13.1/official/projects/yolo/serving/export_tflite.py, models-2.13.1/official/projects/yolo/serving/model_fn.py, models-2.13.1/official/projects/yolo/tasks/__init__.py, models-2.13.1/official/projects/yolo/tasks/image_classification.py, models-2.13.1/official/projects/yolo/tasks/task_utils.py, models-2.13.1/official/projects/yolo/tasks/yolo.py, models-2.13.1/official/projects/yolo/tasks/yolov7.py, models-2.13.1/official/projects/yolo/README.md, models-2.13.1/official/projects/yolo/__init__.py, models-2.13.1/official/projects/yolo/train.py, models-2.13.1/official/projects/yt8m/configs/__init__.py, models-2.13.1/official/projects/yt8m/configs/yt8m.py, models-2.13.1/official/projects/yt8m/configs/yt8m_test.py, models-2.13.1/official/projects/yt8m/dataloaders/utils.py, models-2.13.1/official/projects/yt8m/dataloaders/yt8m_input.py, models-2.13.1/official/projects/yt8m/dataloaders/yt8m_input_test.py, models-2.13.1/official/projects/yt8m/eval_utils/average_precision_calculator.py, models-2.13.1/official/projects/yt8m/eval_utils/eval_util.py, models-2.13.1/official/projects/yt8m/eval_utils/eval_util_test.py, models-2.13.1/official/projects/yt8m/eval_utils/mean_average_precision_calculator.py, models-2.13.1/official/projects/yt8m/experiments/yt8m.yaml, models-2.13.1/official/projects/yt8m/experiments/yt8m_test.yaml, models-2.13.1/official/projects/yt8m/modeling/__init__.py, models-2.13.1/official/projects/yt8m/modeling/nn_layers.py, models-2.13.1/official/projects/yt8m/modeling/yt8m_model.py, models-2.13.1/official/projects/yt8m/modeling/yt8m_model_test.py, models-2.13.1/official/projects/yt8m/modeling/yt8m_model_utils.py, models-2.13.1/official/projects/yt8m/tasks/__init__.py, models-2.13.1/official/projects/yt8m/tasks/yt8m_task.py, models-2.13.1/official/projects/yt8m/README.md, models-2.13.1/official/projects/yt8m/__init__.py, models-2.13.1/official/projects/yt8m/train.py, models-2.13.1/official/projects/yt8m/train_test.py, models-2.13.1/official/projects/README.md, models-2.13.1/official/projects/__init__.py, models-2.13.1/official/recommendation/ranking/configs/yaml/dcn_v2_criteo_tpu.yaml, models-2.13.1/official/recommendation/ranking/configs/yaml/dlrm_criteo_tpu.yaml, models-2.13.1/official/recommendation/ranking/configs/__init__.py, models-2.13.1/official/recommendation/ranking/configs/config.py, models-2.13.1/official/recommendation/ranking/configs/config_test.py, models-2.13.1/official/recommendation/ranking/data/__init__.py, models-2.13.1/official/recommendation/ranking/data/data_pipeline.py, models-2.13.1/official/recommendation/ranking/data/data_pipeline_test.py, models-2.13.1/official/recommendation/ranking/preprocessing/README.md, models-2.13.1/official/recommendation/ranking/preprocessing/criteo_preprocess.py, models-2.13.1/official/recommendation/ranking/preprocessing/setup.py, models-2.13.1/official/recommendation/ranking/preprocessing/shard_rebalancer.py, models-2.13.1/official/recommendation/ranking/README.md, models-2.13.1/official/recommendation/ranking/__init__.py, models-2.13.1/official/recommendation/ranking/common.py, models-2.13.1/official/recommendation/ranking/task.py, models-2.13.1/official/recommendation/ranking/task_test.py, models-2.13.1/official/recommendation/ranking/train.py, models-2.13.1/official/recommendation/ranking/train_test.py, models-2.13.1/official/recommendation/README.md, models-2.13.1/official/recommendation/__init__.py, models-2.13.1/official/recommendation/constants.py, models-2.13.1/official/recommendation/create_ncf_data.py, models-2.13.1/official/recommendation/data_pipeline.py, models-2.13.1/official/recommendation/data_preprocessing.py, models-2.13.1/official/recommendation/data_test.py, models-2.13.1/official/recommendation/movielens.py, models-2.13.1/official/recommendation/ncf_common.py, models-2.13.1/official/recommendation/ncf_input_pipeline.py, models-2.13.1/official/recommendation/ncf_keras_main.py, models-2.13.1/official/recommendation/ncf_test.py, models-2.13.1/official/recommendation/neumf_model.py, models-2.13.1/official/recommendation/popen_helper.py, models-2.13.1/official/recommendation/run.sh, models-2.13.1/official/recommendation/stat_utils.py, models-2.13.1/official/utils/docs/README.md, models-2.13.1/official/utils/docs/__init__.py, models-2.13.1/official/utils/docs/build_orbit_api_docs.py, models-2.13.1/official/utils/docs/build_tfm_api_docs.py, models-2.13.1/official/utils/flags/README.md, models-2.13.1/official/utils/flags/__init__.py, models-2.13.1/official/utils/flags/_base.py, models-2.13.1/official/utils/flags/_benchmark.py, models-2.13.1/official/utils/flags/_conventions.py, models-2.13.1/official/utils/flags/_device.py, models-2.13.1/official/utils/flags/_distribution.py, models-2.13.1/official/utils/flags/_misc.py, models-2.13.1/official/utils/flags/_performance.py, models-2.13.1/official/utils/flags/core.py, models-2.13.1/official/utils/flags/flags_test.py, models-2.13.1/official/utils/flags/guidelines.md, models-2.13.1/official/utils/misc/__init__.py, models-2.13.1/official/utils/misc/keras_utils.py, models-2.13.1/official/utils/misc/model_helpers.py, models-2.13.1/official/utils/misc/model_helpers_test.py, models-2.13.1/official/utils/testing/scripts/builds_common.sh, models-2.13.1/official/utils/testing/scripts/ci_sanity.sh, models-2.13.1/official/utils/testing/scripts/presubmit.sh, models-2.13.1/official/utils/testing/__init__.py, models-2.13.1/official/utils/testing/integration.py, models-2.13.1/official/utils/testing/mock_task.py, models-2.13.1/official/utils/testing/pylint.rcfile, models-2.13.1/official/utils/__init__.py, models-2.13.1/official/utils/hyperparams_flags.py, models-2.13.1/official/vision/configs/experiments/image_classification/imagenet_mobilenetv1_tpu.yaml, models-2.13.1/official/vision/configs/experiments/image_classification/imagenet_mobilenetv2_gpu.yaml, models-2.13.1/official/vision/configs/experiments/image_classification/imagenet_mobilenetv2_tpu.yaml, models-2.13.1/official/vision/configs/experiments/image_classification/imagenet_mobilenetv3.5_avg_tpu.yaml, models-2.13.1/official/vision/configs/experiments/image_classification/imagenet_mobilenetv3.5_avgseg_tpu.yaml, models-2.13.1/official/vision/configs/experiments/image_classification/imagenet_mobilenetv3large_tpu.yaml, models-2.13.1/official/vision/configs/experiments/image_classification/imagenet_mobilenetv3small_tpu.yaml, models-2.13.1/official/vision/configs/experiments/image_classification/imagenet_resnet101_deeplab_tpu.yaml, models-2.13.1/official/vision/configs/experiments/image_classification/imagenet_resnet101_tpu.yaml, models-2.13.1/official/vision/configs/experiments/image_classification/imagenet_resnet152_tpu.yaml, models-2.13.1/official/vision/configs/experiments/image_classification/imagenet_resnet26_tpu.yaml, models-2.13.1/official/vision/configs/experiments/image_classification/imagenet_resnet50_deeplab_tpu.yaml, models-2.13.1/official/vision/configs/experiments/image_classification/imagenet_resnet50_gpu.yaml, models-2.13.1/official/vision/configs/experiments/image_classification/imagenet_resnet50_tfds_tpu.yaml, models-2.13.1/official/vision/configs/experiments/image_classification/imagenet_resnet50_tpu.yaml, models-2.13.1/official/vision/configs/experiments/image_classification/imagenet_resnetrs101_i160.yaml, models-2.13.1/official/vision/configs/experiments/image_classification/imagenet_resnetrs101_i192.yaml, models-2.13.1/official/vision/configs/experiments/image_classification/imagenet_resnetrs152_i192.yaml, models-2.13.1/official/vision/configs/experiments/image_classification/imagenet_resnetrs152_i224.yaml, models-2.13.1/official/vision/configs/experiments/image_classification/imagenet_resnetrs152_i256.yaml, models-2.13.1/official/vision/configs/experiments/image_classification/imagenet_resnetrs200_i256.yaml, models-2.13.1/official/vision/configs/experiments/image_classification/imagenet_resnetrs270_i256.yaml, models-2.13.1/official/vision/configs/experiments/image_classification/imagenet_resnetrs350_i256.yaml, models-2.13.1/official/vision/configs/experiments/image_classification/imagenet_resnetrs350_i320.yaml, models-2.13.1/official/vision/configs/experiments/image_classification/imagenet_resnetrs420_i320.yaml, models-2.13.1/official/vision/configs/experiments/image_classification/imagenet_resnetrs50_i160.yaml, models-2.13.1/official/vision/configs/experiments/image_classification/imagenet_resnetrs50_i160_gpu.yaml, models-2.13.1/official/vision/configs/experiments/image_classification/imagenet_vitb16_i224_gpu.yaml, models-2.13.1/official/vision/configs/experiments/image_classification/imagenet_vitb16_i224_tpu.yaml, models-2.13.1/official/vision/configs/experiments/image_classification/imagenet_vitb16_i384_gpu.yaml, models-2.13.1/official/vision/configs/experiments/image_classification/imagenet_vitb16_i384_tpu.yaml, models-2.13.1/official/vision/configs/experiments/image_classification/imagenet_vith14_i384_gpu.yaml, models-2.13.1/official/vision/configs/experiments/image_classification/imagenet_vith14_i384_tpu.yaml, models-2.13.1/official/vision/configs/experiments/image_classification/imagenet_vitl16_i224_gpu.yaml, models-2.13.1/official/vision/configs/experiments/image_classification/imagenet_vitl16_i224_tpu.yaml, models-2.13.1/official/vision/configs/experiments/image_classification/imagenet_vitl16_i384_gpu.yaml, models-2.13.1/official/vision/configs/experiments/image_classification/imagenet_vitl16_i384_tpu.yaml, models-2.13.1/official/vision/configs/experiments/image_classification/imagenet_vits16_i224_gpu.yaml, models-2.13.1/official/vision/configs/experiments/image_classification/imagenet_vits16_i224_tpu.yaml, models-2.13.1/official/vision/configs/experiments/image_classification/imagenet_vitti16_i224_gpu.yaml, models-2.13.1/official/vision/configs/experiments/image_classification/imagenet_vitti16_i224_tpu.yaml, models-2.13.1/official/vision/configs/experiments/maskrcnn/coco_mobilenetv2_mrcnn_tpu.yaml, models-2.13.1/official/vision/configs/experiments/maskrcnn/coco_spinenet143_cascadercnn_tpu.yaml, models-2.13.1/official/vision/configs/experiments/maskrcnn/coco_spinenet143_mrcnn_tpu.yaml, models-2.13.1/official/vision/configs/experiments/maskrcnn/coco_spinenet49_cascadercnn_tpu.yaml, models-2.13.1/official/vision/configs/experiments/maskrcnn/coco_spinenet49_mrcnn_tpu.yaml, models-2.13.1/official/vision/configs/experiments/maskrcnn/coco_spinenet96_cascadercnn_tpu.yaml, models-2.13.1/official/vision/configs/experiments/maskrcnn/coco_spinenet96_mrcnn_tpu.yaml, models-2.13.1/official/vision/configs/experiments/maskrcnn/r50fpn_640_coco_scratch_tpu4x4.yaml, models-2.13.1/official/vision/configs/experiments/retinanet/coco_mobiledetcpu_tpu.yaml, models-2.13.1/official/vision/configs/experiments/retinanet/coco_mobilenetv2_tpu.yaml, models-2.13.1/official/vision/configs/experiments/retinanet/coco_mobilenetv3.5_avg_tpu.yaml, models-2.13.1/official/vision/configs/experiments/retinanet/coco_spinenet143_gpu_multiworker_mirrored.yaml, models-2.13.1/official/vision/configs/experiments/retinanet/coco_spinenet143_tpu.yaml, models-2.13.1/official/vision/configs/experiments/retinanet/coco_spinenet190_tpu.yaml, models-2.13.1/official/vision/configs/experiments/retinanet/coco_spinenet49_gpu_multiworker_mirrored.yaml, models-2.13.1/official/vision/configs/experiments/retinanet/coco_spinenet49_mobile_tpu.yaml, models-2.13.1/official/vision/configs/experiments/retinanet/coco_spinenet49_tpu.yaml, models-2.13.1/official/vision/configs/experiments/retinanet/coco_spinenet49s_mobile_tpu.yaml, models-2.13.1/official/vision/configs/experiments/retinanet/coco_spinenet49xs_mobile_tpu.yaml, models-2.13.1/official/vision/configs/experiments/retinanet/coco_spinenet96_gpu_multiworker_mirrored.yaml, models-2.13.1/official/vision/configs/experiments/retinanet/coco_spinenet96_tpu.yaml, models-2.13.1/official/vision/configs/experiments/retinanet/resnet50fpn_coco_tfds_tpu.yaml, models-2.13.1/official/vision/configs/experiments/retinanet/resnet50fpn_coco_tpu4x4_benchmark.yaml, models-2.13.1/official/vision/configs/experiments/semantic_segmentation/deeplabv3plus_resnet101_cityscapes_gpu.yaml, models-2.13.1/official/vision/configs/experiments/semantic_segmentation/deeplabv3plus_resnet101_cityscapes_gpu_multiworker_mirrored.yaml, models-2.13.1/official/vision/configs/experiments/semantic_segmentation/deeplabv3plus_resnet101_cityscapes_tfds_tpu.yaml, models-2.13.1/official/vision/configs/experiments/video_classification/k400_3d-resnet50_tpu.yaml, models-2.13.1/official/vision/configs/experiments/video_classification/k400_resnet3drs_50_tpu.yaml, models-2.13.1/official/vision/configs/experiments/video_classification/k400_slowonly16x4_tpu.yaml, models-2.13.1/official/vision/configs/experiments/video_classification/k400_slowonly8x8_tpu.yaml, models-2.13.1/official/vision/configs/experiments/video_classification/k600_3d-resnet50_tpu.yaml, models-2.13.1/official/vision/configs/experiments/video_classification/k600_3d-resnet50g_tpu.yaml, models-2.13.1/official/vision/configs/experiments/video_classification/k600_slowonly8x8_tpu.yaml, models-2.13.1/official/vision/configs/__init__.py, models-2.13.1/official/vision/configs/backbones.py, models-2.13.1/official/vision/configs/backbones_3d.py, models-2.13.1/official/vision/configs/common.py, models-2.13.1/official/vision/configs/decoders.py, models-2.13.1/official/vision/configs/image_classification.py, models-2.13.1/official/vision/configs/image_classification_test.py, models-2.13.1/official/vision/configs/maskrcnn.py, models-2.13.1/official/vision/configs/maskrcnn_test.py, models-2.13.1/official/vision/configs/retinanet.py, models-2.13.1/official/vision/configs/retinanet_test.py, models-2.13.1/official/vision/configs/semantic_segmentation.py, models-2.13.1/official/vision/configs/semantic_segmentation_test.py, models-2.13.1/official/vision/configs/video_classification.py, models-2.13.1/official/vision/configs/video_classification_test.py, models-2.13.1/official/vision/data/__init__.py, models-2.13.1/official/vision/data/create_coco_tf_record.py, models-2.13.1/official/vision/data/fake_feature_generator.py, models-2.13.1/official/vision/data/image_utils.py, models-2.13.1/official/vision/data/image_utils_test.py, models-2.13.1/official/vision/data/process_coco_few_shot.sh, models-2.13.1/official/vision/data/process_coco_few_shot_json_files.py, models-2.13.1/official/vision/data/process_coco_panoptic.sh, models-2.13.1/official/vision/data/tf_example_builder.py, models-2.13.1/official/vision/data/tf_example_builder_test.py, models-2.13.1/official/vision/data/tf_example_feature_key.py, models-2.13.1/official/vision/data/tfrecord_lib.py, models-2.13.1/official/vision/data/tfrecord_lib_test.py, models-2.13.1/official/vision/dataloaders/__init__.py, models-2.13.1/official/vision/dataloaders/classification_input.py, models-2.13.1/official/vision/dataloaders/decoder.py, models-2.13.1/official/vision/dataloaders/input_reader.py, models-2.13.1/official/vision/dataloaders/input_reader_factory.py, models-2.13.1/official/vision/dataloaders/maskrcnn_input.py, models-2.13.1/official/vision/dataloaders/parser.py, models-2.13.1/official/vision/dataloaders/retinanet_input.py, models-2.13.1/official/vision/dataloaders/segmentation_input.py, models-2.13.1/official/vision/dataloaders/tf_example_decoder.py, models-2.13.1/official/vision/dataloaders/tf_example_decoder_test.py, models-2.13.1/official/vision/dataloaders/tf_example_label_map_decoder.py, models-2.13.1/official/vision/dataloaders/tf_example_label_map_decoder_test.py, models-2.13.1/official/vision/dataloaders/tfds_classification_decoders.py, models-2.13.1/official/vision/dataloaders/tfds_detection_decoders.py, models-2.13.1/official/vision/dataloaders/tfds_factory.py, models-2.13.1/official/vision/dataloaders/tfds_factory_test.py, models-2.13.1/official/vision/dataloaders/tfds_segmentation_decoders.py, models-2.13.1/official/vision/dataloaders/tfexample_utils.py, models-2.13.1/official/vision/dataloaders/utils.py, models-2.13.1/official/vision/dataloaders/utils_test.py, models-2.13.1/official/vision/dataloaders/video_input.py, models-2.13.1/official/vision/dataloaders/video_input_test.py, models-2.13.1/official/vision/evaluation/__init__.py, models-2.13.1/official/vision/evaluation/coco_evaluator.py, models-2.13.1/official/vision/evaluation/coco_utils.py, models-2.13.1/official/vision/evaluation/coco_utils_test.py, models-2.13.1/official/vision/evaluation/instance_metrics.py, models-2.13.1/official/vision/evaluation/instance_metrics_test.py, models-2.13.1/official/vision/evaluation/iou.py, models-2.13.1/official/vision/evaluation/iou_test.py, models-2.13.1/official/vision/evaluation/panoptic_quality.py, models-2.13.1/official/vision/evaluation/panoptic_quality_evaluator.py, models-2.13.1/official/vision/evaluation/panoptic_quality_evaluator_test.py, models-2.13.1/official/vision/evaluation/panoptic_quality_test.py, models-2.13.1/official/vision/evaluation/segmentation_metrics.py, models-2.13.1/official/vision/evaluation/segmentation_metrics_test.py, models-2.13.1/official/vision/evaluation/wod_detection_evaluator.py, models-2.13.1/official/vision/examples/starter/README.md, models-2.13.1/official/vision/examples/starter/example_config.py, models-2.13.1/official/vision/examples/starter/example_config_local.yaml, models-2.13.1/official/vision/examples/starter/example_config_tpu.yaml, models-2.13.1/official/vision/examples/starter/example_input.py, models-2.13.1/official/vision/examples/starter/example_model.py, models-2.13.1/official/vision/examples/starter/example_task.py, models-2.13.1/official/vision/examples/starter/registry_imports.py, models-2.13.1/official/vision/examples/starter/train.py, models-2.13.1/official/vision/losses/__init__.py, models-2.13.1/official/vision/losses/focal_loss.py, models-2.13.1/official/vision/losses/loss_utils.py, models-2.13.1/official/vision/losses/maskrcnn_losses.py, models-2.13.1/official/vision/losses/maskrcnn_losses_test.py, models-2.13.1/official/vision/losses/retinanet_losses.py, models-2.13.1/official/vision/losses/segmentation_losses.py, models-2.13.1/official/vision/losses/segmentation_losses_test.py, models-2.13.1/official/vision/modeling/backbones/__init__.py, models-2.13.1/official/vision/modeling/backbones/efficientnet.py, models-2.13.1/official/vision/modeling/backbones/efficientnet_test.py, models-2.13.1/official/vision/modeling/backbones/factory.py, models-2.13.1/official/vision/modeling/backbones/factory_test.py, models-2.13.1/official/vision/modeling/backbones/mobiledet.py, models-2.13.1/official/vision/modeling/backbones/mobiledet_test.py, models-2.13.1/official/vision/modeling/backbones/mobilenet.py, models-2.13.1/official/vision/modeling/backbones/mobilenet_test.py, models-2.13.1/official/vision/modeling/backbones/resnet.py, models-2.13.1/official/vision/modeling/backbones/resnet_3d.py, models-2.13.1/official/vision/modeling/backbones/resnet_3d_test.py, models-2.13.1/official/vision/modeling/backbones/resnet_deeplab.py, models-2.13.1/official/vision/modeling/backbones/resnet_deeplab_test.py, models-2.13.1/official/vision/modeling/backbones/resnet_test.py, models-2.13.1/official/vision/modeling/backbones/revnet.py, models-2.13.1/official/vision/modeling/backbones/revnet_test.py, models-2.13.1/official/vision/modeling/backbones/spinenet.py, models-2.13.1/official/vision/modeling/backbones/spinenet_mobile.py, models-2.13.1/official/vision/modeling/backbones/spinenet_mobile_test.py, models-2.13.1/official/vision/modeling/backbones/spinenet_test.py, models-2.13.1/official/vision/modeling/backbones/vit.py, models-2.13.1/official/vision/modeling/backbones/vit_specs.py, models-2.13.1/official/vision/modeling/backbones/vit_test.py, models-2.13.1/official/vision/modeling/decoders/__init__.py, models-2.13.1/official/vision/modeling/decoders/aspp.py, models-2.13.1/official/vision/modeling/decoders/aspp_test.py, models-2.13.1/official/vision/modeling/decoders/factory.py, models-2.13.1/official/vision/modeling/decoders/factory_test.py, models-2.13.1/official/vision/modeling/decoders/fpn.py, models-2.13.1/official/vision/modeling/decoders/fpn_test.py, models-2.13.1/official/vision/modeling/decoders/nasfpn.py, models-2.13.1/official/vision/modeling/decoders/nasfpn_test.py, models-2.13.1/official/vision/modeling/heads/__init__.py, models-2.13.1/official/vision/modeling/heads/dense_prediction_heads.py, models-2.13.1/official/vision/modeling/heads/dense_prediction_heads_test.py, models-2.13.1/official/vision/modeling/heads/instance_heads.py, models-2.13.1/official/vision/modeling/heads/instance_heads_test.py, models-2.13.1/official/vision/modeling/heads/segmentation_heads.py, models-2.13.1/official/vision/modeling/heads/segmentation_heads_test.py, models-2.13.1/official/vision/modeling/layers/__init__.py, models-2.13.1/official/vision/modeling/layers/box_sampler.py, models-2.13.1/official/vision/modeling/layers/deeplab.py, models-2.13.1/official/vision/modeling/layers/deeplab_test.py, models-2.13.1/official/vision/modeling/layers/detection_generator.py, models-2.13.1/official/vision/modeling/layers/detection_generator_test.py, models-2.13.1/official/vision/modeling/layers/edgetpu.py, models-2.13.1/official/vision/modeling/layers/edgetpu_test.py, models-2.13.1/official/vision/modeling/layers/mask_sampler.py, models-2.13.1/official/vision/modeling/layers/nn_blocks.py, models-2.13.1/official/vision/modeling/layers/nn_blocks_3d.py, models-2.13.1/official/vision/modeling/layers/nn_blocks_3d_test.py, models-2.13.1/official/vision/modeling/layers/nn_blocks_test.py, models-2.13.1/official/vision/modeling/layers/nn_layers.py, models-2.13.1/official/vision/modeling/layers/nn_layers_test.py, models-2.13.1/official/vision/modeling/layers/roi_aligner.py, models-2.13.1/official/vision/modeling/layers/roi_aligner_test.py, models-2.13.1/official/vision/modeling/layers/roi_generator.py, models-2.13.1/official/vision/modeling/layers/roi_sampler.py, models-2.13.1/official/vision/modeling/models/__init__.py, models-2.13.1/official/vision/modeling/__init__.py, models-2.13.1/official/vision/modeling/classification_model.py, models-2.13.1/official/vision/modeling/classification_model_test.py, models-2.13.1/official/vision/modeling/factory.py, models-2.13.1/official/vision/modeling/factory_3d.py, models-2.13.1/official/vision/modeling/factory_test.py, models-2.13.1/official/vision/modeling/maskrcnn_model.py, models-2.13.1/official/vision/modeling/maskrcnn_model_test.py, models-2.13.1/official/vision/modeling/retinanet_model.py, models-2.13.1/official/vision/modeling/retinanet_model_test.py, models-2.13.1/official/vision/modeling/segmentation_model.py, models-2.13.1/official/vision/modeling/segmentation_model_test.py, models-2.13.1/official/vision/modeling/video_classification_model.py, models-2.13.1/official/vision/modeling/video_classification_model_test.py, models-2.13.1/official/vision/ops/__init__.py, models-2.13.1/official/vision/ops/anchor.py, models-2.13.1/official/vision/ops/anchor_generator.py, models-2.13.1/official/vision/ops/anchor_generator_test.py, models-2.13.1/official/vision/ops/anchor_test.py, models-2.13.1/official/vision/ops/augment.py, models-2.13.1/official/vision/ops/augment_test.py, models-2.13.1/official/vision/ops/box_matcher.py, models-2.13.1/official/vision/ops/box_matcher_test.py, models-2.13.1/official/vision/ops/box_ops.py, models-2.13.1/official/vision/ops/iou_similarity.py, models-2.13.1/official/vision/ops/iou_similarity_test.py, models-2.13.1/official/vision/ops/mask_ops.py, models-2.13.1/official/vision/ops/mask_ops_test.py, models-2.13.1/official/vision/ops/nms.py, models-2.13.1/official/vision/ops/preprocess_ops.py, models-2.13.1/official/vision/ops/preprocess_ops_3d.py, models-2.13.1/official/vision/ops/preprocess_ops_3d_test.py, models-2.13.1/official/vision/ops/preprocess_ops_test.py, models-2.13.1/official/vision/ops/sampling_ops.py, models-2.13.1/official/vision/ops/spatial_transform_ops.py, models-2.13.1/official/vision/ops/target_gather.py, models-2.13.1/official/vision/ops/target_gather_test.py, models-2.13.1/official/vision/serving/__init__.py, models-2.13.1/official/vision/serving/detection.py, models-2.13.1/official/vision/serving/detection_test.py, models-2.13.1/official/vision/serving/export_base.py, models-2.13.1/official/vision/serving/export_base_v2.py, models-2.13.1/official/vision/serving/export_base_v2_test.py, models-2.13.1/official/vision/serving/export_module_factory.py, models-2.13.1/official/vision/serving/export_module_factory_test.py, models-2.13.1/official/vision/serving/export_saved_model.py, models-2.13.1/official/vision/serving/export_saved_model_lib.py, models-2.13.1/official/vision/serving/export_saved_model_lib_test.py, models-2.13.1/official/vision/serving/export_saved_model_lib_v2.py, models-2.13.1/official/vision/serving/export_tfhub.py, models-2.13.1/official/vision/serving/export_tfhub_lib.py, models-2.13.1/official/vision/serving/export_tflite.py, models-2.13.1/official/vision/serving/export_tflite_lib.py, models-2.13.1/official/vision/serving/export_utils.py, models-2.13.1/official/vision/serving/image_classification.py, models-2.13.1/official/vision/serving/image_classification_test.py, models-2.13.1/official/vision/serving/semantic_segmentation.py, models-2.13.1/official/vision/serving/semantic_segmentation_test.py, models-2.13.1/official/vision/serving/video_classification.py, models-2.13.1/official/vision/serving/video_classification_test.py, models-2.13.1/official/vision/tasks/__init__.py, models-2.13.1/official/vision/tasks/image_classification.py, models-2.13.1/official/vision/tasks/maskrcnn.py, models-2.13.1/official/vision/tasks/retinanet.py, models-2.13.1/official/vision/tasks/semantic_segmentation.py, models-2.13.1/official/vision/tasks/video_classification.py, models-2.13.1/official/vision/utils/object_detection/__init__.py, models-2.13.1/official/vision/utils/object_detection/argmax_matcher.py, models-2.13.1/official/vision/utils/object_detection/balanced_positive_negative_sampler.py, models-2.13.1/official/vision/utils/object_detection/box_coder.py, models-2.13.1/official/vision/utils/object_detection/box_list.py, models-2.13.1/official/vision/utils/object_detection/box_list_ops.py, models-2.13.1/official/vision/utils/object_detection/faster_rcnn_box_coder.py, models-2.13.1/official/vision/utils/object_detection/matcher.py, models-2.13.1/official/vision/utils/object_detection/minibatch_sampler.py, models-2.13.1/official/vision/utils/object_detection/ops.py, models-2.13.1/official/vision/utils/object_detection/preprocessor.py, models-2.13.1/official/vision/utils/object_detection/region_similarity_calculator.py, models-2.13.1/official/vision/utils/object_detection/shape_utils.py, models-2.13.1/official/vision/utils/object_detection/target_assigner.py, models-2.13.1/official/vision/utils/object_detection/visualization_utils.py, models-2.13.1/official/vision/utils/__init__.py, models-2.13.1/official/vision/utils/summary_manager.py, models-2.13.1/official/vision/MODEL_GARDEN.md, models-2.13.1/official/vision/README.md, models-2.13.1/official/vision/__init__.py, models-2.13.1/official/vision/registry_imports.py, models-2.13.1/official/vision/train.py, models-2.13.1/official/vision/train_spatial_partitioning.py, models-2.13.1/official/README-TPU.md, models-2.13.1/official/README.md, models-2.13.1/official/__init__.py, models-2.13.1/official/nightly_requirements.txt, models-2.13.1/official/requirements.txt, models-2.13.1/orbit/actions/__init__.py, models-2.13.1/orbit/actions/conditional_action.py, models-2.13.1/orbit/actions/conditional_action_test.py, models-2.13.1/orbit/actions/export_saved_model.py, models-2.13.1/orbit/actions/export_saved_model_test.py, models-2.13.1/orbit/actions/new_best_metric.py, models-2.13.1/orbit/actions/new_best_metric_test.py, models-2.13.1/orbit/actions/save_checkpoint_if_preempted.py, models-2.13.1/orbit/examples/single_task/__init__.py, models-2.13.1/orbit/examples/single_task/single_task_evaluator.py, models-2.13.1/orbit/examples/single_task/single_task_evaluator_test.py, models-2.13.1/orbit/examples/single_task/single_task_trainer.py, models-2.13.1/orbit/examples/single_task/single_task_trainer_test.py, models-2.13.1/orbit/examples/__init__.py, models-2.13.1/orbit/utils/__init__.py, models-2.13.1/orbit/utils/common.py, models-2.13.1/orbit/utils/common_test.py, models-2.13.1/orbit/utils/epoch_helper.py, models-2.13.1/orbit/utils/loop_fns.py, models-2.13.1/orbit/utils/summary_manager.py, models-2.13.1/orbit/utils/summary_manager_interface.py, models-2.13.1/orbit/utils/tpu_summaries.py, models-2.13.1/orbit/utils/tpu_summaries_test.py, models-2.13.1/orbit/LICENSE, models-2.13.1/orbit/README.md, models-2.13.1/orbit/__init__.py, models-2.13.1/orbit/controller.py, models-2.13.1/orbit/controller_test.py, models-2.13.1/orbit/runner.py, models-2.13.1/orbit/standard_runner.py, models-2.13.1/orbit/standard_runner_test.py, models-2.13.1/tensorflow_models/nlp/__init__.py, models-2.13.1/tensorflow_models/vision/__init__.py, models-2.13.1/tensorflow_models/__init__.py, models-2.13.1/tensorflow_models/tensorflow_models_pypi.ipynb, models-2.13.1/tensorflow_models/tensorflow_models_test.py, models-2.13.1/.gitignore, models-2.13.1/AUTHORS, models-2.13.1/CODEOWNERS, models-2.13.1/CONTRIBUTING.md, models-2.13.1/ISSUES.md, models-2.13.1/LICENSE, models-2.13.1/README.md, models-2.13.1/logfile_resnet50_fp32_bs_128_tf_2.13
parent cd3038e4
This diff is collapsed.
{
"cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "vXLA5InzXydn"
},
"source": [
"##### Copyright 2021 The TensorFlow Authors."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"cellView": "form",
"id": "RuRlpLL-X0R_"
},
"outputs": [],
"source": [
"#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n",
"# you may not use this file except in compliance with the License.\n",
"# You may obtain a copy of the License at\n",
"#\n",
"# https://www.apache.org/licenses/LICENSE-2.0\n",
"#\n",
"# Unless required by applicable law or agreed to in writing, software\n",
"# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
"# See the License for the specific language governing permissions and\n",
"# limitations under the License."
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "2X-XaMSVcLua"
},
"source": [
"# Decoding API"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "hYEwGTeCXnnX"
},
"source": [
"\u003ctable class=\"tfo-notebook-buttons\" align=\"left\"\u003e\n",
" \u003ctd\u003e\n",
" \u003ca target=\"_blank\" href=\"https://www.tensorflow.org/tfmodels/nlp/decoding_api\"\u003e\u003cimg src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" /\u003eView on TensorFlow.org\u003c/a\u003e\n",
" \u003c/td\u003e\n",
" \u003ctd\u003e\n",
" \u003ca target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/models/blob/master/docs/nlp/decoding_api.ipynb\"\u003e\u003cimg src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" /\u003eRun in Google Colab\u003c/a\u003e\n",
" \u003c/td\u003e\n",
" \u003ctd\u003e\n",
" \u003ca target=\"_blank\" href=\"https://github.com/tensorflow/models/blob/master/docs/nlp/decoding_api.ipynb\"\u003e\u003cimg src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" /\u003eView source on GitHub\u003c/a\u003e\n",
" \u003c/td\u003e\n",
" \u003ctd\u003e\n",
" \u003ca href=\"https://storage.googleapis.com/tensorflow_docs/models/docs/nlp/decoding_api.ipynb\"\u003e\u003cimg src=\"https://www.tensorflow.org/images/download_logo_32px.png\" /\u003eDownload notebook\u003c/a\u003e\n",
" \u003c/td\u003e\n",
"\u003c/table\u003e"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "fsACVQpVSifi"
},
"source": [
"### Install the TensorFlow Model Garden pip package\n",
"\n",
"* `tf-models-official` is the stable Model Garden package. Note that it may not include the latest changes in the `tensorflow_models` github repo. To include latest changes, you may install `tf-models-nightly`,\n",
"which is the nightly Model Garden package created daily automatically.\n",
"* pip will install all models and dependencies automatically."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "G4BhAu01HZcM"
},
"outputs": [],
"source": [
"!pip uninstall -y opencv-python"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "2j-xhrsVQOQT"
},
"outputs": [],
"source": [
"!pip install tf-models-official"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "BjP7zwxmskpY"
},
"outputs": [],
"source": [
"import os\n",
"\n",
"import numpy as np\n",
"import matplotlib.pyplot as plt\n",
"\n",
"import tensorflow as tf\n",
"\n",
"from tensorflow_models import nlp"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "T92ccAzlnGqh"
},
"outputs": [],
"source": [
"def length_norm(length, dtype):\n",
" \"\"\"Return length normalization factor.\"\"\"\n",
" return tf.pow(((5. + tf.cast(length, dtype)) / 6.), 0.0)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "0AWgyo-IQ5sP"
},
"source": [
"## Overview\n",
"\n",
"This API provides an interface to experiment with different decoding strategies used for auto-regressive models.\n",
"\n",
"1. The following sampling strategies are provided in sampling_module.py, which inherits from the base Decoding class:\n",
" * [top_p](https://arxiv.org/abs/1904.09751) : [github](https://github.com/tensorflow/models/blob/master/official/nlp/modeling/ops/sampling_module.py#L65) \n",
"\n",
" This implementation chooses the most probable logits with cumulative probabilities up to top_p.\n",
"\n",
" * [top_k](https://arxiv.org/pdf/1805.04833.pdf) : [github](https://github.com/tensorflow/models/blob/master/official/nlp/modeling/ops/sampling_module.py#L48)\n",
"\n",
" At each timestep, this implementation samples from top-k logits based on their probability distribution\n",
"\n",
" * Greedy : [github](https://github.com/tensorflow/models/blob/master/official/nlp/modeling/ops/sampling_module.py#L26)\n",
"\n",
" This implementation returns the top logits based on probabilities.\n",
"\n",
"2. Beam search is provided in beam_search.py. [github](https://github.com/tensorflow/models/blob/master/official/nlp/modeling/ops/beam_search.py)\n",
"\n",
" This implementation reduces the risk of missing hidden high probability logits by keeping the most likely num_beams of logits at each time step and eventually choosing the logits that has the overall highest probability."
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "MfOj7oaBRQnS"
},
"source": [
"## Initialize Sampling Module in TF-NLP.\n",
"\n",
"\n",
"\u003e **symbols_to_logits_fn** : This is a closure implemented by the users of the API. The input to this closure will be \n",
"```\n",
"Args:\n",
" 1] ids [batch_size, .. (index + 1 or 1 if padded_decode is True)],\n",
" 2] index [scalar] : current decoded step,\n",
" 3] cache [nested dictionary of tensors].\n",
"Returns:\n",
" 1] tensor for next-step logits [batch_size, vocab]\n",
" 2] the updated_cache [nested dictionary of tensors].\n",
"```\n",
"This closure calls the model to predict the logits for the 'index+1' step. The cache is used for faster decoding.\n",
"Here is a [reference](https://github.com/tensorflow/models/blob/master/official/nlp/modeling/ops/beam_search_test.py#L88) implementation for the above closure.\n",
"\n",
"\n",
"\u003e **length_normalization_fn** : Closure for returning length normalization parameter.\n",
"```\n",
"Args: \n",
" 1] length : scalar for decoded step index.\n",
" 2] dtype : data-type of output tensor\n",
"Returns:\n",
" 1] value of length normalization factor.\n",
"Example :\n",
" def _length_norm(length, dtype):\n",
" return tf.pow(((5. + tf.cast(length, dtype)) / 6.), 0.0)\n",
"```\n",
"\n",
"\u003e **vocab_size** : Output vocabulary size.\n",
"\n",
"\u003e **max_decode_length** : Scalar for total number of decoding steps.\n",
"\n",
"\u003e **eos_id** : Decoding will stop if all output decoded ids in the batch have this ID.\n",
"\n",
"\u003e **padded_decode** : Set this to True if running on TPU. Tensors are padded to max_decoding_length if this is True.\n",
"\n",
"\u003e **top_k** : top_k is enabled if this value is \u003e 1.\n",
"\n",
"\u003e **top_p** : top_p is enabled if this value is \u003e 0 and \u003c 1.0\n",
"\n",
"\u003e **sampling_temperature** : This is used to re-estimate the softmax output. Temperature skews the distribution towards high-probability tokens and lowers the mass in the tail distribution. Value has to be positive. Low temperature is equivalent to greedy and makes the distribution sharper, while high temperature makes it flatter.\n",
"\n",
"\u003e **enable_greedy** : By default, this is true and greedy decoding is enabled.\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "lV1RRp6ihnGX"
},
"source": [
"## Initialize the Model Hyper-parameters"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "eTsGp2gaKLdE"
},
"outputs": [],
"source": [
"params = {\n",
" 'num_heads': 2,\n",
" 'num_layers': 2,\n",
" 'batch_size': 2,\n",
" 'n_dims': 256,\n",
" 'max_decode_length': 4}"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "CYXkoplAij01"
},
"source": [
"## Initialize cache. "
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "UGvmd0_dRFYI"
},
"source": [
"In auto-regressive architectures like Transformer based [Encoder-Decoder](https://arxiv.org/abs/1706.03762) models, \n",
"Cache is used for fast sequential decoding.\n",
"It is a nested dictionary storing pre-computed hidden-states (key and values in the self-attention blocks and the cross-attention blocks) for every layer."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "D6kfZOOKgkm1"
},
"outputs": [],
"source": [
"cache = {\n",
" 'layer_%d' % layer: {\n",
" 'k': tf.zeros(\n",
" shape=[params['batch_size'], params['max_decode_length'], params['num_heads'], params['n_dims'] // params['num_heads']],\n",
" dtype=tf.float32),\n",
" 'v': tf.zeros(\n",
" shape=[params['batch_size'], params['max_decode_length'], params['num_heads'], params['n_dims'] // params['num_heads']],\n",
" dtype=tf.float32)\n",
" } for layer in range(params['num_layers'])\n",
" }\n",
"print(\"cache value shape for layer 1 :\", cache['layer_1']['k'].shape)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "syl7I5nURPgW"
},
"source": [
"### Create model_fn\n",
" In practice, this will be replaced by an actual model implementation such as [here](https://github.com/tensorflow/models/blob/master/official/nlp/transformer/transformer.py#L236)\n",
"```\n",
"Args:\n",
"i : Step that is being decoded.\n",
"Returns:\n",
" logit probabilities of size [batch_size, 1, vocab_size]\n",
"```\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "AhzSkRisRdB6"
},
"outputs": [],
"source": [
"probabilities = tf.constant([[[0.3, 0.4, 0.3], [0.3, 0.3, 0.4],\n",
" [0.1, 0.1, 0.8], [0.1, 0.1, 0.8]],\n",
" [[0.2, 0.5, 0.3], [0.2, 0.7, 0.1],\n",
" [0.1, 0.1, 0.8], [0.1, 0.1, 0.8]]])\n",
"def model_fn(i):\n",
" return probabilities[:, i, :]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "FAJ4CpbfVdjr"
},
"outputs": [],
"source": [
"def _symbols_to_logits_fn():\n",
" \"\"\"Calculates logits of the next tokens.\"\"\"\n",
" def symbols_to_logits_fn(ids, i, temp_cache):\n",
" del ids\n",
" logits = tf.cast(tf.math.log(model_fn(i)), tf.float32)\n",
" return logits, temp_cache\n",
" return symbols_to_logits_fn"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "R_tV3jyWVL47"
},
"source": [
"## Greedy \n",
"Greedy decoding selects the token id with the highest probability as its next id: $id_t = argmax_{w}P(id | id_{1:t-1})$ at each timestep $t$. The following sketch shows greedy decoding. "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "aGt9idSkVQEJ"
},
"outputs": [],
"source": [
"greedy_obj = sampling_module.SamplingModule(\n",
" length_normalization_fn=None,\n",
" dtype=tf.float32,\n",
" symbols_to_logits_fn=_symbols_to_logits_fn(),\n",
" vocab_size=3,\n",
" max_decode_length=params['max_decode_length'],\n",
" eos_id=10,\n",
" padded_decode=False)\n",
"ids, _ = greedy_obj.generate(\n",
" initial_ids=tf.constant([9, 1]), initial_cache=cache)\n",
"print(\"Greedy Decoded Ids:\", ids)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "s4pTTsQXVz5O"
},
"source": [
"## top_k sampling\n",
"In *Top-K* sampling, the *K* most likely next token ids are filtered and the probability mass is redistributed among only those *K* ids. "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "pCLWIn6GV5_G"
},
"outputs": [],
"source": [
"top_k_obj = sampling_module.SamplingModule(\n",
" length_normalization_fn=length_norm,\n",
" dtype=tf.float32,\n",
" symbols_to_logits_fn=_symbols_to_logits_fn(),\n",
" vocab_size=3,\n",
" max_decode_length=params['max_decode_length'],\n",
" eos_id=10,\n",
" sample_temperature=tf.constant(1.0),\n",
" top_k=tf.constant(3),\n",
" padded_decode=False,\n",
" enable_greedy=False)\n",
"ids, _ = top_k_obj.generate(\n",
" initial_ids=tf.constant([9, 1]), initial_cache=cache)\n",
"print(\"top-k sampled Ids:\", ids)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "Jp3G-eE_WI4Y"
},
"source": [
"## top_p sampling\n",
"Instead of sampling only from the most likely *K* token ids, in *Top-p* sampling chooses from the smallest possible set of ids whose cumulative probability exceeds the probability *p*."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "rEGdIWcuWILO"
},
"outputs": [],
"source": [
"top_p_obj = sampling_module.SamplingModule(\n",
" length_normalization_fn=length_norm,\n",
" dtype=tf.float32,\n",
" symbols_to_logits_fn=_symbols_to_logits_fn(),\n",
" vocab_size=3,\n",
" max_decode_length=params['max_decode_length'],\n",
" eos_id=10,\n",
" sample_temperature=tf.constant(1.0),\n",
" top_p=tf.constant(0.9),\n",
" padded_decode=False,\n",
" enable_greedy=False)\n",
"ids, _ = top_p_obj.generate(\n",
" initial_ids=tf.constant([9, 1]), initial_cache=cache)\n",
"print(\"top-p sampled Ids:\", ids)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "2hcuyJ2VWjDz"
},
"source": [
"## Beam search decoding\n",
"Beam search reduces the risk of missing hidden high probability token ids by keeping the most likely num_beams of hypotheses at each time step and eventually choosing the hypothesis that has the overall highest probability. "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "cJ3WzvSrWmSA"
},
"outputs": [],
"source": [
"beam_size = 2\n",
"params['batch_size'] = 1\n",
"beam_cache = {\n",
" 'layer_%d' % layer: {\n",
" 'k': tf.zeros([params['batch_size'], params['max_decode_length'], params['num_heads'], params['n_dims']], dtype=tf.float32),\n",
" 'v': tf.zeros([params['batch_size'], params['max_decode_length'], params['num_heads'], params['n_dims']], dtype=tf.float32)\n",
" } for layer in range(params['num_layers'])\n",
" }\n",
"print(\"cache key shape for layer 1 :\", beam_cache['layer_1']['k'].shape)\n",
"ids, _ = beam_search.sequence_beam_search(\n",
" symbols_to_logits_fn=_symbols_to_logits_fn(),\n",
" initial_ids=tf.constant([9], tf.int32),\n",
" initial_cache=beam_cache,\n",
" vocab_size=3,\n",
" beam_size=beam_size,\n",
" alpha=0.6,\n",
" max_decode_length=params['max_decode_length'],\n",
" eos_id=10,\n",
" padded_decode=False,\n",
" dtype=tf.float32)\n",
"print(\"Beam search ids:\", ids)"
]
}
],
"metadata": {
"accelerator": "GPU",
"colab": {
"collapsed_sections": [],
"name": "decoding_api_in_tf_nlp.ipynb",
"provenance": [],
"toc_visible": true
},
"kernelspec": {
"display_name": "Python 3",
"name": "python3"
}
},
"nbformat": 4,
"nbformat_minor": 0
}
This diff is collapsed.
{
"cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "80xnUmoI7fBX"
},
"source": [
"##### Copyright 2020 The TensorFlow Authors."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"cellView": "form",
"id": "8nvTnfs6Q692"
},
"outputs": [],
"source": [
"#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n",
"# you may not use this file except in compliance with the License.\n",
"# You may obtain a copy of the License at\n",
"#\n",
"# https://www.apache.org/licenses/LICENSE-2.0\n",
"#\n",
"# Unless required by applicable law or agreed to in writing, software\n",
"# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
"# See the License for the specific language governing permissions and\n",
"# limitations under the License."
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "WmfcMK5P5C1G"
},
"source": [
"# Introduction to the TensorFlow Models NLP library"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "cH-oJ8R6AHMK"
},
"source": [
"\u003ctable class=\"tfo-notebook-buttons\" align=\"left\"\u003e\n",
" \u003ctd\u003e\n",
" \u003ca target=\"_blank\" href=\"https://www.tensorflow.org/tfmodels/nlp\"\u003e\u003cimg src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" /\u003eView on TensorFlow.org\u003c/a\u003e\n",
" \u003c/td\u003e\n",
" \u003ctd\u003e\n",
" \u003ca target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/models/blob/master/docs/nlp/index.ipynb\"\u003e\u003cimg src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" /\u003eRun in Google Colab\u003c/a\u003e\n",
" \u003c/td\u003e\n",
" \u003ctd\u003e\n",
" \u003ca target=\"_blank\" href=\"https://github.com/tensorflow/models/blob/master/docs/nlp/index.ipynb\"\u003e\u003cimg src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" /\u003eView source on GitHub\u003c/a\u003e\n",
" \u003c/td\u003e\n",
" \u003ctd\u003e\n",
" \u003ca href=\"https://storage.googleapis.com/tensorflow_docs/models/docs/nlp/index.ipynb\"\u003e\u003cimg src=\"https://www.tensorflow.org/images/download_logo_32px.png\" /\u003eDownload notebook\u003c/a\u003e\n",
" \u003c/td\u003e\n",
"\u003c/table\u003e"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "0H_EFIhq4-MJ"
},
"source": [
"## Learning objectives\n",
"\n",
"In this Colab notebook, you will learn how to build transformer-based models for common NLP tasks including pretraining, span labelling and classification using the building blocks from [NLP modeling library](https://github.com/tensorflow/models/tree/master/official/nlp/modeling)."
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "2N97-dps_nUk"
},
"source": [
"## Install and import"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "459ygAVl_rg0"
},
"source": [
"### Install the TensorFlow Model Garden pip package\n",
"\n",
"* `tf-models-official` is the stable Model Garden package. Note that it may not include the latest changes in the `tensorflow_models` github repo. To include latest changes, you may install `tf-models-nightly`,\n",
"which is the nightly Model Garden package created daily automatically.\n",
"* `pip` will install all models and dependencies automatically."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "Y-qGkdh6_sZc"
},
"outputs": [],
"source": [
"!pip install tf-models-official"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "e4huSSwyAG_5"
},
"source": [
"### Import Tensorflow and other libraries"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "jqYXqtjBAJd9"
},
"outputs": [],
"source": [
"import numpy as np\n",
"import tensorflow as tf\n",
"\n",
"from tensorflow_models import nlp"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "djBQWjvy-60Y"
},
"source": [
"## BERT pretraining model\n",
"\n",
"BERT ([Pre-training of Deep Bidirectional Transformers for Language Understanding](https://arxiv.org/abs/1810.04805)) introduced the method of pre-training language representations on a large text corpus and then using that model for downstream NLP tasks.\n",
"\n",
"In this section, we will learn how to build a model to pretrain BERT on the masked language modeling task and next sentence prediction task. For simplicity, we only show the minimum example and use dummy data."
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "MKuHVlsCHmiq"
},
"source": [
"### Build a `BertPretrainer` model wrapping `BertEncoder`\n",
"\n",
"The `nlp.networks.BertEncoder` class implements the Transformer-based encoder as described in [BERT paper](https://arxiv.org/abs/1810.04805). It includes the embedding lookups and transformer layers (`nlp.layers.TransformerEncoderBlock`), but not the masked language model or classification task networks.\n",
"\n",
"The `nlp.models.BertPretrainer` class allows a user to pass in a transformer stack, and instantiates the masked language model and classification networks that are used to create the training objectives."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "EXkcXz-9BwB3"
},
"outputs": [],
"source": [
"# Build a small transformer network.\n",
"vocab_size = 100\n",
"network = nlp.networks.BertEncoder(\n",
" vocab_size=vocab_size, \n",
" # The number of TransformerEncoderBlock layers\n",
" num_layers=3)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "0NH5irV5KTMS"
},
"source": [
"Inspecting the encoder, we see it contains few embedding layers, stacked `nlp.layers.TransformerEncoderBlock` layers and are connected to three input layers:\n",
"\n",
"`input_word_ids`, `input_type_ids` and `input_mask`.\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "lZNoZkBrIoff"
},
"outputs": [],
"source": [
"tf.keras.utils.plot_model(network, show_shapes=True, expand_nested=True, dpi=48)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "o7eFOZXiIl-b"
},
"outputs": [],
"source": [
"# Create a BERT pretrainer with the created network.\n",
"num_token_predictions = 8\n",
"bert_pretrainer = nlp.models.BertPretrainer(\n",
" network, num_classes=2, num_token_predictions=num_token_predictions, output='predictions')"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "d5h5HT7gNHx_"
},
"source": [
"Inspecting the `bert_pretrainer`, we see it wraps the `encoder` with additional `MaskedLM` and `nlp.layers.ClassificationHead` heads."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "2tcNfm03IBF7"
},
"outputs": [],
"source": [
"tf.keras.utils.plot_model(bert_pretrainer, show_shapes=True, expand_nested=True, dpi=48)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "F2oHrXGUIS0M"
},
"outputs": [],
"source": [
"# We can feed some dummy data to get masked language model and sentence output.\n",
"sequence_length = 16\n",
"batch_size = 2\n",
"\n",
"word_id_data = np.random.randint(vocab_size, size=(batch_size, sequence_length))\n",
"mask_data = np.random.randint(2, size=(batch_size, sequence_length))\n",
"type_id_data = np.random.randint(2, size=(batch_size, sequence_length))\n",
"masked_lm_positions_data = np.random.randint(2, size=(batch_size, num_token_predictions))\n",
"\n",
"outputs = bert_pretrainer(\n",
" [word_id_data, mask_data, type_id_data, masked_lm_positions_data])\n",
"lm_output = outputs[\"masked_lm\"]\n",
"sentence_output = outputs[\"classification\"]\n",
"print(f'lm_output: shape={lm_output.shape}, dtype={lm_output.dtype!r}')\n",
"print(f'sentence_output: shape={sentence_output.shape}, dtype={sentence_output.dtype!r}')"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "bnx3UCHniCS5"
},
"source": [
"### Compute loss\n",
"Next, we can use `lm_output` and `sentence_output` to compute `loss`."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "k30H4Q86f52x"
},
"outputs": [],
"source": [
"masked_lm_ids_data = np.random.randint(vocab_size, size=(batch_size, num_token_predictions))\n",
"masked_lm_weights_data = np.random.randint(2, size=(batch_size, num_token_predictions))\n",
"next_sentence_labels_data = np.random.randint(2, size=(batch_size))\n",
"\n",
"mlm_loss = nlp.losses.weighted_sparse_categorical_crossentropy_loss(\n",
" labels=masked_lm_ids_data,\n",
" predictions=lm_output,\n",
" weights=masked_lm_weights_data)\n",
"sentence_loss = nlp.losses.weighted_sparse_categorical_crossentropy_loss(\n",
" labels=next_sentence_labels_data,\n",
" predictions=sentence_output)\n",
"loss = mlm_loss + sentence_loss\n",
"\n",
"print(loss)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "wrmSs8GjHxVw"
},
"source": [
"With the loss, you can optimize the model.\n",
"After training, we can save the weights of TransformerEncoder for the downstream fine-tuning tasks. Please see [run_pretraining.py](https://github.com/tensorflow/models/blob/master/official/legacy/bert/run_pretraining.py) for the full example.\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "k8cQVFvBCV4s"
},
"source": [
"## Span labeling model\n",
"\n",
"Span labeling is the task to assign labels to a span of the text, for example, label a span of text as the answer of a given question.\n",
"\n",
"In this section, we will learn how to build a span labeling model. Again, we use dummy data for simplicity."
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "xrLLEWpfknUW"
},
"source": [
"### Build a BertSpanLabeler wrapping BertEncoder\n",
"\n",
"The `nlp.models.BertSpanLabeler` class implements a simple single-span start-end predictor (that is, a model that predicts two values: a start token index and an end token index), suitable for SQuAD-style tasks.\n",
"\n",
"Note that `nlp.models.BertSpanLabeler` wraps a `nlp.networks.BertEncoder`, the weights of which can be restored from the above pretraining model.\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "B941M4iUCejO"
},
"outputs": [],
"source": [
"network = nlp.networks.BertEncoder(\n",
" vocab_size=vocab_size, num_layers=2)\n",
"\n",
"# Create a BERT trainer with the created network.\n",
"bert_span_labeler = nlp.models.BertSpanLabeler(network)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "QpB9pgj4PpMg"
},
"source": [
"Inspecting the `bert_span_labeler`, we see it wraps the encoder with additional `SpanLabeling` that outputs `start_position` and `end_position`."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "RbqRNJCLJu4H"
},
"outputs": [],
"source": [
"tf.keras.utils.plot_model(bert_span_labeler, show_shapes=True, expand_nested=True, dpi=48)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "fUf1vRxZJwio"
},
"outputs": [],
"source": [
"# Create a set of 2-dimensional data tensors to feed into the model.\n",
"word_id_data = np.random.randint(vocab_size, size=(batch_size, sequence_length))\n",
"mask_data = np.random.randint(2, size=(batch_size, sequence_length))\n",
"type_id_data = np.random.randint(2, size=(batch_size, sequence_length))\n",
"\n",
"# Feed the data to the model.\n",
"start_logits, end_logits = bert_span_labeler([word_id_data, mask_data, type_id_data])\n",
"\n",
"print(f'start_logits: shape={start_logits.shape}, dtype={start_logits.dtype!r}')\n",
"print(f'end_logits: shape={end_logits.shape}, dtype={end_logits.dtype!r}')"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "WqhgQaN1lt-G"
},
"source": [
"### Compute loss\n",
"With `start_logits` and `end_logits`, we can compute loss:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "waqs6azNl3Nn"
},
"outputs": [],
"source": [
"start_positions = np.random.randint(sequence_length, size=(batch_size))\n",
"end_positions = np.random.randint(sequence_length, size=(batch_size))\n",
"\n",
"start_loss = tf.keras.losses.sparse_categorical_crossentropy(\n",
" start_positions, start_logits, from_logits=True)\n",
"end_loss = tf.keras.losses.sparse_categorical_crossentropy(\n",
" end_positions, end_logits, from_logits=True)\n",
"\n",
"total_loss = (tf.reduce_mean(start_loss) + tf.reduce_mean(end_loss)) / 2\n",
"print(total_loss)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "Zdf03YtZmd_d"
},
"source": [
"With the `loss`, you can optimize the model. Please see [run_squad.py](https://github.com/tensorflow/models/blob/master/official/legacy/bert/run_squad.py) for the full example."
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "0A1XnGSTChg9"
},
"source": [
"## Classification model\n",
"\n",
"In the last section, we show how to build a text classification model.\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "MSK8OpZgnQa9"
},
"source": [
"### Build a BertClassifier model wrapping BertEncoder\n",
"\n",
"`nlp.models.BertClassifier` implements a [CLS] token classification model containing a single classification head."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "cXXCsffkCphk"
},
"outputs": [],
"source": [
"network = nlp.networks.BertEncoder(\n",
" vocab_size=vocab_size, num_layers=2)\n",
"\n",
"# Create a BERT trainer with the created network.\n",
"num_classes = 2\n",
"bert_classifier = nlp.models.BertClassifier(\n",
" network, num_classes=num_classes)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "8tZKueKYP4bB"
},
"source": [
"Inspecting the `bert_classifier`, we see it wraps the `encoder` with additional `Classification` head."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "snlutm9ZJgEZ"
},
"outputs": [],
"source": [
"tf.keras.utils.plot_model(bert_classifier, show_shapes=True, expand_nested=True, dpi=48)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "yyHPHsqBJkCz"
},
"outputs": [],
"source": [
"# Create a set of 2-dimensional data tensors to feed into the model.\n",
"word_id_data = np.random.randint(vocab_size, size=(batch_size, sequence_length))\n",
"mask_data = np.random.randint(2, size=(batch_size, sequence_length))\n",
"type_id_data = np.random.randint(2, size=(batch_size, sequence_length))\n",
"\n",
"# Feed the data to the model.\n",
"logits = bert_classifier([word_id_data, mask_data, type_id_data])\n",
"print(f'logits: shape={logits.shape}, dtype={logits.dtype!r}')"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "w--a2mg4nzKm"
},
"source": [
"### Compute loss\n",
"\n",
"With `logits`, we can compute `loss`:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "9X0S1DoFn_5Q"
},
"outputs": [],
"source": [
"labels = np.random.randint(num_classes, size=(batch_size))\n",
"\n",
"loss = tf.keras.losses.sparse_categorical_crossentropy(\n",
" labels, logits, from_logits=True)\n",
"print(loss)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "mzBqOylZo3og"
},
"source": [
"With the `loss`, you can optimize the model. Please see the [Fine tune_bert](https://www.tensorflow.org/text/tutorials/fine_tune_bert) notebook or the [model training documentation](https://github.com/tensorflow/models/blob/master/official/nlp/docs/train.md) for the full example."
]
}
],
"metadata": {
"colab": {
"name": "nlp_modeling_library_intro.ipynb",
"provenance": [],
"toc_visible": true
},
"kernelspec": {
"display_name": "Python 3",
"name": "python3"
}
},
"nbformat": 4,
"nbformat_minor": 0
}
This diff is collapsed.
This diff is collapsed.
toc:
- title: "Example: Image classification"
path: /tfmodels/vision/image_classification
- title: "Example: Object Detection"
path: /tfmodels/vision/object_detection
- title: "Example: Semantic Segmentation"
path: /tfmodels/vision/semantic_segmentation
- title: "Example: Instance Segmentation"
path: /tfmodels/vision/instance_segmentation
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
# Offically Supported TensorFlow 2.1+ Models on Cloud TPU
## Natural Language Processing
* [bert](nlp/bert): A powerful pre-trained language representation model:
BERT, which stands for Bidirectional Encoder Representations from
Transformers.
[BERT FineTuning with Cloud TPU](https://cloud.google.com/tpu/docs/tutorials/bert-2.x) provides step by step instructions on Cloud TPU training. You can look [Bert MNLI Tensorboard.dev metrics](https://tensorboard.dev/experiment/LijZ1IrERxKALQfr76gndA) for MNLI fine tuning task.
* [transformer](nlp/transformer): A transformer model to translate the WMT
English to German dataset.
[Training transformer on Cloud TPU](https://cloud.google.com/tpu/docs/tutorials/transformer-2.x) for step by step instructions on Cloud TPU training.
## Computer Vision
* [efficientnet](vision/image_classification): A family of convolutional
neural networks that scale by balancing network depth, width, and
resolution and can be used to classify ImageNet's dataset of 1000 classes.
See [Tensorboard.dev training metrics](https://tensorboard.dev/experiment/KnaWjrq5TXGfv0NW5m7rpg/#scalars).
* [mnist](vision/image_classification): A basic model to classify digits
from the MNIST dataset. See [Running MNIST on Cloud TPU](https://cloud.google.com/tpu/docs/tutorials/mnist-2.x) tutorial and [Tensorboard.dev metrics](https://tensorboard.dev/experiment/mIah5lppTASvrHqWrdr6NA).
* [mask-rcnn](vision/detection): An object detection and instance segmentation model. See [Tensorboard.dev training metrics](https://tensorboard.dev/experiment/LH7k0fMsRwqUAcE09o9kPA).
* [resnet](vision/image_classification): A deep residual network that can
be used to classify ImageNet's dataset of 1000 classes.
See [Training ResNet on Cloud TPU](https://cloud.google.com/tpu/docs/tutorials/resnet-2.x) tutorial and [Tensorboard.dev metrics](https://tensorboard.dev/experiment/CxlDK8YMRrSpYEGtBRpOhg).
* [retinanet](vision/detection): A fast and powerful object detector. See [Tensorboard.dev training metrics](https://tensorboard.dev/experiment/b8NRnWU3TqG6Rw0UxueU6Q).
* [shapemask](vision/detection): An object detection and instance segmentation model using shape priors. See [Tensorboard.dev training metrics](https://tensorboard.dev/experiment/ZbXgVoc6Rf6mBRlPj0JpLA).
## Recommendation
* [dlrm](recommendation/ranking): [Deep Learning Recommendation Model for
Personalization and Recommendation Systems](https://arxiv.org/abs/1906.00091).
* [dcn v2](recommendation/ranking): [Improved Deep & Cross Network and Practical Lessons for Web-scale Learning to Rank Systems](https://arxiv.org/abs/2008.13535).
* [ncf](recommendation): Neural Collaborative Filtering. See [Tensorboard.dev training metrics](https://tensorboard.dev/experiment/0k3gKjZlR1ewkVTRyLB6IQ).
This diff is collapsed.
# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
This diff is collapsed.
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model garden benchmark definitions."""
# tf-vision benchmarks
IMAGE_CLASSIFICATION_BENCHMARKS = {
'image_classification.resnet50.tpu.4x4.bf16':
dict(
experiment_type='resnet_imagenet',
platform='tpu.4x4',
precision='bfloat16',
metric_bounds=[{
'name': 'accuracy',
'min_value': 0.76,
'max_value': 0.77
}],
config_files=[('official/vision/configs/experiments/'
'image_classification/imagenet_resnet50_tpu.yaml')]),
'image_classification.resnet50.gpu.8.fp16':
dict(
experiment_type='resnet_imagenet',
platform='gpu.8',
precision='float16',
metric_bounds=[{
'name': 'accuracy',
'min_value': 0.76,
'max_value': 0.77
}],
config_files=[('official/vision/configs/experiments/'
'image_classification/imagenet_resnet50_gpu.yaml')])
}
VISION_BENCHMARKS = {
'image_classification': IMAGE_CLASSIFICATION_BENCHMARKS,
}
NLP_BENCHMARKS = {
}
QAT_BENCHMARKS = {
}
TENSOR_TRACER_BENCHMARKS = {
}
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment