Commit 855d29db authored by Sara Beery's avatar Sara Beery Committed by TF Object Detection Team
Browse files

Converting context r-cnn dataset tools to TF2

PiperOrigin-RevId: 323805090
parent a565d720
......@@ -53,7 +53,7 @@ import os
import numpy as np
import PIL.Image
import six
import tensorflow.compat.v1 as tf
import tensorflow as tf
try:
import apache_beam as beam # pylint:disable=g-import-not-at-top
......
......@@ -25,11 +25,12 @@ import unittest
import numpy as np
import six
import tensorflow.compat.v1 as tf
import tensorflow as tf
from object_detection.dataset_tools.context_rcnn import add_context_to_examples
from object_detection.utils import tf_version
if tf_version.is_tf2():
from object_detection.dataset_tools.context_rcnn import add_context_to_examples # pylint:disable=g-import-not-at-top
try:
import apache_beam as beam # pylint:disable=g-import-not-at-top
......@@ -42,7 +43,7 @@ def InMemoryTFRecord(entries):
temp = tempfile.NamedTemporaryFile(delete=False)
filename = temp.name
try:
with tf.python_io.TFRecordWriter(filename) as writer:
with tf.io.TFRecordWriter(filename) as writer:
for value in entries:
writer.write(value)
yield filename
......@@ -70,13 +71,12 @@ def FloatListFeature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class GenerateContextDataTest(tf.test.TestCase):
def _create_first_tf_example(self):
with self.test_session():
encoded_image = tf.image.encode_jpeg(
tf.constant(np.ones((4, 4, 3)).astype(np.uint8))).eval()
encoded_image = tf.io.encode_jpeg(
tf.constant(np.ones((4, 4, 3)).astype(np.uint8))).numpy()
example = tf.train.Example(features=tf.train.Features(feature={
'image/encoded': BytesFeature(encoded_image),
......@@ -105,9 +105,8 @@ class GenerateContextDataTest(tf.test.TestCase):
return example.SerializeToString()
def _create_second_tf_example(self):
with self.test_session():
encoded_image = tf.image.encode_jpeg(
tf.constant(np.ones((4, 4, 3)).astype(np.uint8))).eval()
encoded_image = tf.io.encode_jpeg(
tf.constant(np.ones((4, 4, 3)).astype(np.uint8))).numpy()
example = tf.train.Example(features=tf.train.Features(feature={
'image/encoded': BytesFeature(encoded_image),
......@@ -353,7 +352,8 @@ class GenerateContextDataTest(tf.test.TestCase):
p.run()
filenames = tf.io.gfile.glob(output_tfrecord + '-?????-of-?????')
actual_output = []
record_iterator = tf.python_io.tf_record_iterator(path=filenames[0])
record_iterator = tf.data.TFRecordDataset(
tf.convert_to_tensor(filenames)).as_numpy_iterator()
for record in record_iterator:
actual_output.append(record)
self.assertEqual(len(actual_output), 2)
......@@ -383,8 +383,8 @@ class GenerateContextDataTest(tf.test.TestCase):
p.run()
filenames = tf.io.gfile.glob(output_tfrecord + '-?????-of-?????')
actual_output = []
record_iterator = tf.python_io.tf_record_iterator(
path=filenames[0])
record_iterator = tf.data.TFRecordDataset(
tf.convert_to_tensor(filenames)).as_numpy_iterator()
for record in record_iterator:
actual_output.append(record)
self.assertEqual(len(actual_output), 1)
......
......@@ -37,11 +37,10 @@ import argparse
import hashlib
import io
import json
import logging
import os
import numpy as np
import PIL.Image
import tensorflow.compat.v1 as tf
import tensorflow as tf
from object_detection.utils import dataset_util
try:
......@@ -110,16 +109,9 @@ class ParseImage(beam.DoFn):
encoded_jpg = fid.read()
encoded_jpg_io = io.BytesIO(encoded_jpg)
image = PIL.Image.open(encoded_jpg_io)
# Ensure the image can be read by tf
with tf.Graph().as_default():
image = tf.image.decode_jpeg(encoded_jpg, channels=3)
init_op = tf.initialize_all_tables()
with tf.Session() as sess:
sess.run(init_op)
sess.run(image)
except Exception as e: # pylint: disable=broad-except
image = tf.io.decode_jpeg(encoded_jpg, channels=3)
except Exception: # pylint: disable=broad-except
# The image file is missing or corrupt
tf.logging.error(str(e))
return []
key = hashlib.sha256(encoded_jpg).hexdigest()
......@@ -257,8 +249,6 @@ def create_pipeline(pipeline,
keep_bboxes: Whether to keep any bounding boxes that exist in the json file
"""
logging.info('Reading data from COCO-CameraTraps Dataset.')
data = load_json_data(input_annotations_file)
num_shards = int(np.ceil(float(len(data['images']))/num_images_per_shard))
......
......@@ -25,17 +25,19 @@ import unittest
import numpy as np
from PIL import Image
import tensorflow.compat.v1 as tf
from object_detection.dataset_tools.context_rcnn import create_cococameratraps_tfexample_main
import tensorflow as tf
from object_detection.utils import tf_version
if tf_version.is_tf2():
from object_detection.dataset_tools.context_rcnn import create_cococameratraps_tfexample_main # pylint:disable=g-import-not-at-top
try:
import apache_beam as beam # pylint:disable=g-import-not-at-top
except ModuleNotFoundError:
pass
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class CreateCOCOCameraTrapsTfexampleTest(tf.test.TestCase):
IMAGE_HEIGHT = 360
......@@ -175,7 +177,8 @@ class CreateCOCOCameraTrapsTfexampleTest(tf.test.TestCase):
p.run()
filenames = tf.io.gfile.glob(output_tfrecord + '-?????-of-?????')
actual_output = []
record_iterator = tf.python_io.tf_record_iterator(path=filenames[0])
record_iterator = tf.data.TFRecordDataset(
tf.convert_to_tensor(filenames)).as_numpy_iterator()
for record in record_iterator:
actual_output.append(record)
self.assertEqual(len(actual_output), num_frames)
......@@ -198,7 +201,8 @@ class CreateCOCOCameraTrapsTfexampleTest(tf.test.TestCase):
p.run()
filenames = tf.io.gfile.glob(output_tfrecord+'-?????-of-?????')
actual_output = []
record_iterator = tf.python_io.tf_record_iterator(path=filenames[0])
record_iterator = tf.data.TFRecordDataset(
tf.convert_to_tensor(filenames)).as_numpy_iterator()
for record in record_iterator:
actual_output.append(record)
self.assertEqual(len(actual_output), num_frames)
......
......@@ -48,7 +48,8 @@ from __future__ import print_function
import argparse
import os
import threading
import tensorflow.compat.v1 as tf
import tensorflow as tf
try:
import apache_beam as beam # pylint:disable=g-import-not-at-top
except ModuleNotFoundError:
......@@ -85,22 +86,7 @@ class GenerateDetectionDataFn(beam.DoFn):
# one instance across all threads in the worker. This is possible since
# tf.Session.run() is thread safe.
with self.session_lock:
if self._session is None:
graph = tf.Graph()
self._session = tf.Session(graph=graph)
with graph.as_default():
meta_graph = tf.saved_model.loader.load(
self._session, [tf.saved_model.tag_constants.SERVING],
self._model_dir)
signature = meta_graph.signature_def['serving_default']
input_tensor_name = signature.inputs['inputs'].name
self._input = graph.get_tensor_by_name(input_tensor_name)
self._boxes_node = graph.get_tensor_by_name(
signature.outputs['detection_boxes'].name)
self._scores_node = graph.get_tensor_by_name(
signature.outputs['detection_scores'].name)
self._num_detections_node = graph.get_tensor_by_name(
signature.outputs['num_detections'].name)
self._detect_fn = tf.saved_model.load(self._model_dir)
def process(self, tfrecord_entry):
return self._run_inference_and_generate_detections(tfrecord_entry)
......@@ -112,9 +98,11 @@ class GenerateDetectionDataFn(beam.DoFn):
# There are already ground truth boxes for this image, just keep them.
return [input_example]
detection_boxes, detection_scores, num_detections = self._session.run(
[self._boxes_node, self._scores_node, self._num_detections_node],
feed_dict={self._input: [tfrecord_entry]})
detections = self._detect_fn.signatures['serving_default'](
(tf.expand_dims(tf.convert_to_tensor(tfrecord_entry), 0)))
detection_boxes = detections['detection_boxes']
num_detections = detections['num_detections']
detection_scores = detections['detection_scores']
example = tf.train.Example()
......
......@@ -24,15 +24,17 @@ import tempfile
import unittest
import numpy as np
import six
import tensorflow.compat.v1 as tf
import tensorflow as tf
from object_detection import exporter
from object_detection import exporter_lib_v2
from object_detection.builders import model_builder
from object_detection.core import model
from object_detection.dataset_tools.context_rcnn import generate_detection_data
from object_detection.protos import pipeline_pb2
from object_detection.utils import tf_version
if tf_version.is_tf2():
from object_detection.dataset_tools.context_rcnn import generate_detection_data # pylint:disable=g-import-not-at-top
if six.PY2:
import mock # pylint: disable=g-import-not-at-top
else:
......@@ -45,17 +47,23 @@ except ModuleNotFoundError:
class FakeModel(model.DetectionModel):
"""A Fake Detection model with expected output nodes from post-processing."""
def __init__(self, conv_weight_scalar=1.0):
super(FakeModel, self).__init__(num_classes=5)
self._conv = tf.keras.layers.Conv2D(
filters=1, kernel_size=1, strides=(1, 1), padding='valid',
kernel_initializer=tf.keras.initializers.Constant(
value=conv_weight_scalar))
def preprocess(self, inputs):
true_image_shapes = [] # Doesn't matter for the fake model.
return tf.identity(inputs), true_image_shapes
def predict(self, preprocessed_inputs, true_image_shapes):
return {'image': tf.layers.conv2d(preprocessed_inputs, 3, 1)}
return {'image': self._conv(preprocessed_inputs)}
def postprocess(self, prediction_dict, true_image_shapes):
with tf.control_dependencies(prediction_dict.values()):
with tf.control_dependencies(list(prediction_dict.values())):
postprocessed_tensors = {
'detection_boxes': tf.constant([[[0.0, 0.1, 0.5, 0.6],
[0.5, 0.5, 0.8, 0.8]]], tf.float32),
......@@ -89,7 +97,7 @@ def InMemoryTFRecord(entries):
temp = tempfile.NamedTemporaryFile(delete=False)
filename = temp.name
try:
with tf.python_io.TFRecordWriter(filename) as writer:
with tf.io.TFRecordWriter(filename) as writer:
for value in entries:
writer.write(value)
yield filename
......@@ -97,7 +105,7 @@ def InMemoryTFRecord(entries):
os.unlink(filename)
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class GenerateDetectionDataTest(tf.test.TestCase):
def _save_checkpoint_from_mock_model(self, checkpoint_path):
......@@ -106,64 +114,39 @@ class GenerateDetectionDataTest(tf.test.TestCase):
Args:
checkpoint_path: Path to save checkpoint from Fake model.
"""
g = tf.Graph()
with g.as_default():
mock_model = FakeModel(num_classes=5)
preprocessed_inputs, true_image_shapes = mock_model.preprocess(
tf.placeholder(tf.float32, shape=[None, None, None, 3]))
predictions = mock_model.predict(preprocessed_inputs, true_image_shapes)
mock_model.postprocess(predictions, true_image_shapes)
tf.train.get_or_create_global_step()
saver = tf.train.Saver()
init = tf.global_variables_initializer()
with self.test_session(graph=g) as sess:
sess.run(init)
saver.save(sess, checkpoint_path)
mock_model = FakeModel()
fake_image = tf.zeros(shape=[1, 10, 10, 3], dtype=tf.float32)
preprocessed_inputs, true_image_shapes = mock_model.preprocess(fake_image)
predictions = mock_model.predict(preprocessed_inputs, true_image_shapes)
mock_model.postprocess(predictions, true_image_shapes)
ckpt = tf.train.Checkpoint(model=mock_model)
exported_checkpoint_manager = tf.train.CheckpointManager(
ckpt, checkpoint_path, max_to_keep=1)
exported_checkpoint_manager.save(checkpoint_number=0)
def _export_saved_model(self):
tmp_dir = self.get_temp_dir()
checkpoint_path = os.path.join(tmp_dir, 'model.ckpt')
self._save_checkpoint_from_mock_model(checkpoint_path)
self._save_checkpoint_from_mock_model(tmp_dir)
output_directory = os.path.join(tmp_dir, 'output')
saved_model_path = os.path.join(output_directory, 'saved_model')
tf.io.gfile.makedirs(output_directory)
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel(num_classes=5)
mock_builder.return_value = FakeModel()
output_directory = os.path.join(tmp_dir, 'output')
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
detection_model = model_builder.build(pipeline_config.model,
is_training=False)
outputs, placeholder_tensor = exporter.build_detection_graph(
exporter_lib_v2.export_inference_graph(
input_type='tf_example',
detection_model=detection_model,
input_shape=None,
output_collection_name='inference_op',
graph_hook_fn=None)
output_node_names = ','.join(outputs.keys())
saver = tf.train.Saver()
input_saver_def = saver.as_saver_def()
frozen_graph_def = exporter.freeze_graph_with_def_protos(
input_graph_def=tf.get_default_graph().as_graph_def(),
input_saver_def=input_saver_def,
input_checkpoint=checkpoint_path,
output_node_names=output_node_names,
restore_op_name='save/restore_all',
filename_tensor_name='save/Const:0',
output_graph='',
clear_devices=True,
initializer_nodes='')
exporter.write_saved_model(
saved_model_path=saved_model_path,
frozen_graph_def=frozen_graph_def,
inputs=placeholder_tensor,
outputs=outputs)
return saved_model_path
pipeline_config=pipeline_config,
trained_checkpoint_dir=tmp_dir,
output_directory=output_directory)
saved_model_path = os.path.join(output_directory, 'saved_model')
return saved_model_path
def _create_tf_example(self):
with self.test_session():
encoded_image = tf.image.encode_jpeg(
tf.constant(np.ones((4, 6, 3)).astype(np.uint8))).eval()
encoded_image = tf.io.encode_jpeg(
tf.constant(np.ones((4, 6, 3)).astype(np.uint8))).numpy()
def BytesFeature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
......@@ -264,7 +247,8 @@ class GenerateDetectionDataTest(tf.test.TestCase):
p.run()
filenames = tf.io.gfile.glob(output_tfrecord + '-?????-of-?????')
actual_output = []
record_iterator = tf.python_io.tf_record_iterator(path=filenames[0])
record_iterator = tf.data.TFRecordDataset(
tf.convert_to_tensor(filenames)).as_numpy_iterator()
for record in record_iterator:
actual_output.append(record)
self.assertEqual(len(actual_output), 1)
......
......@@ -55,7 +55,7 @@ import threading
import numpy as np
import six
import tensorflow.compat.v1 as tf
import tensorflow as tf
try:
import apache_beam as beam # pylint:disable=g-import-not-at-top
......@@ -95,27 +95,7 @@ class GenerateEmbeddingDataFn(beam.DoFn):
# one instance across all threads in the worker. This is possible since
# tf.Session.run() is thread safe.
with self.session_lock:
if self._session is None:
graph = tf.Graph()
self._session = tf.Session(graph=graph)
with graph.as_default():
meta_graph = tf.saved_model.loader.load(
self._session, [tf.saved_model.tag_constants.SERVING],
self._model_dir)
signature = meta_graph.signature_def['serving_default']
input_tensor_name = signature.inputs['inputs'].name
detection_features_name = signature.outputs['detection_features'].name
detection_boxes_name = signature.outputs['detection_boxes'].name
num_detections_name = signature.outputs['num_detections'].name
self._input = graph.get_tensor_by_name(input_tensor_name)
self._embedding_node = graph.get_tensor_by_name(detection_features_name)
self._box_node = graph.get_tensor_by_name(detection_boxes_name)
self._scores_node = graph.get_tensor_by_name(
signature.outputs['detection_scores'].name)
self._num_detections = graph.get_tensor_by_name(num_detections_name)
tf.logging.info(signature.outputs['detection_features'].name)
tf.logging.info(signature.outputs['detection_boxes'].name)
tf.logging.info(signature.outputs['num_detections'].name)
self._detect_fn = tf.saved_model.load(self._model_dir)
def process(self, tfrecord_entry):
return self._run_inference_and_generate_embedding(tfrecord_entry)
......@@ -184,13 +164,12 @@ class GenerateEmbeddingDataFn(beam.DoFn):
example.features.feature['image/unix_time'].float_list.value.extend(
[unix_time])
(detection_features, detection_boxes, num_detections,
detection_scores) = self._session.run(
[
self._embedding_node, self._box_node, self._num_detections[0],
self._scores_node
],
feed_dict={self._input: [tfrecord_entry]})
detections = self._detect_fn.signatures['serving_default'](
(tf.expand_dims(tf.convert_to_tensor(tfrecord_entry), 0)))
detection_features = detections['detection_features']
detection_boxes = detections['detection_boxes']
num_detections = detections['num_detections']
detection_scores = detections['detection_scores']
num_detections = int(num_detections)
embed_all = []
......
......@@ -23,14 +23,15 @@ import tempfile
import unittest
import numpy as np
import six
import tensorflow.compat.v1 as tf
from object_detection import exporter
import tensorflow as tf
from object_detection import exporter_lib_v2
from object_detection.builders import model_builder
from object_detection.core import model
from object_detection.dataset_tools.context_rcnn import generate_embedding_data
from object_detection.protos import pipeline_pb2
from object_detection.utils import tf_version
if tf_version.is_tf2():
from object_detection.dataset_tools.context_rcnn import generate_embedding_data # pylint:disable=g-import-not-at-top
if six.PY2:
import mock # pylint: disable=g-import-not-at-top
......@@ -44,14 +45,20 @@ except ModuleNotFoundError:
class FakeModel(model.DetectionModel):
"""A Fake Detection model with expected output nodes from post-processing."""
def __init__(self, conv_weight_scalar=1.0):
super(FakeModel, self).__init__(num_classes=5)
self._conv = tf.keras.layers.Conv2D(
filters=1, kernel_size=1, strides=(1, 1), padding='valid',
kernel_initializer=tf.keras.initializers.Constant(
value=conv_weight_scalar))
def preprocess(self, inputs):
true_image_shapes = [] # Doesn't matter for the fake model.
return tf.identity(inputs), true_image_shapes
def predict(self, preprocessed_inputs, true_image_shapes):
return {'image': tf.layers.conv2d(preprocessed_inputs, 3, 1)}
return {'image': self._conv(preprocessed_inputs)}
def postprocess(self, prediction_dict, true_image_shapes):
with tf.control_dependencies(prediction_dict.values()):
......@@ -96,7 +103,7 @@ def InMemoryTFRecord(entries):
temp = tempfile.NamedTemporaryFile(delete=False)
filename = temp.name
try:
with tf.python_io.TFRecordWriter(filename) as writer:
with tf.io.TFRecordWriter(filename) as writer:
for value in entries:
writer.write(value)
yield filename
......@@ -104,7 +111,7 @@ def InMemoryTFRecord(entries):
os.unlink(temp.name)
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class GenerateEmbeddingData(tf.test.TestCase):
def _save_checkpoint_from_mock_model(self, checkpoint_path):
......@@ -113,64 +120,38 @@ class GenerateEmbeddingData(tf.test.TestCase):
Args:
checkpoint_path: Path to save checkpoint from Fake model.
"""
g = tf.Graph()
with g.as_default():
mock_model = FakeModel(num_classes=5)
preprocessed_inputs, true_image_shapes = mock_model.preprocess(
tf.placeholder(tf.float32, shape=[None, None, None, 3]))
predictions = mock_model.predict(preprocessed_inputs, true_image_shapes)
mock_model.postprocess(predictions, true_image_shapes)
tf.train.get_or_create_global_step()
saver = tf.train.Saver()
init = tf.global_variables_initializer()
with self.test_session(graph=g) as sess:
sess.run(init)
saver.save(sess, checkpoint_path)
mock_model = FakeModel()
fake_image = tf.zeros(shape=[1, 10, 10, 3], dtype=tf.float32)
preprocessed_inputs, true_image_shapes = mock_model.preprocess(fake_image)
predictions = mock_model.predict(preprocessed_inputs, true_image_shapes)
mock_model.postprocess(predictions, true_image_shapes)
ckpt = tf.train.Checkpoint(model=mock_model)
exported_checkpoint_manager = tf.train.CheckpointManager(
ckpt, checkpoint_path, max_to_keep=1)
exported_checkpoint_manager.save(checkpoint_number=0)
def _export_saved_model(self):
tmp_dir = self.get_temp_dir()
checkpoint_path = os.path.join(tmp_dir, 'model.ckpt')
self._save_checkpoint_from_mock_model(checkpoint_path)
self._save_checkpoint_from_mock_model(tmp_dir)
output_directory = os.path.join(tmp_dir, 'output')
saved_model_path = os.path.join(output_directory, 'saved_model')
tf.io.gfile.makedirs(output_directory)
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel(num_classes=5)
mock_builder.return_value = FakeModel()
output_directory = os.path.join(tmp_dir, 'output')
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = False
detection_model = model_builder.build(pipeline_config.model,
is_training=False)
outputs, placeholder_tensor = exporter.build_detection_graph(
exporter_lib_v2.export_inference_graph(
input_type='tf_example',
detection_model=detection_model,
input_shape=None,
output_collection_name='inference_op',
graph_hook_fn=None)
output_node_names = ','.join(outputs.keys())
saver = tf.train.Saver()
input_saver_def = saver.as_saver_def()
frozen_graph_def = exporter.freeze_graph_with_def_protos(
input_graph_def=tf.get_default_graph().as_graph_def(),
input_saver_def=input_saver_def,
input_checkpoint=checkpoint_path,
output_node_names=output_node_names,
restore_op_name='save/restore_all',
filename_tensor_name='save/Const:0',
output_graph='',
clear_devices=True,
initializer_nodes='')
exporter.write_saved_model(
saved_model_path=saved_model_path,
frozen_graph_def=frozen_graph_def,
inputs=placeholder_tensor,
outputs=outputs)
return saved_model_path
pipeline_config=pipeline_config,
trained_checkpoint_dir=tmp_dir,
output_directory=output_directory)
saved_model_path = os.path.join(output_directory, 'saved_model')
return saved_model_path
def _create_tf_example(self):
with self.test_session():
encoded_image = tf.image.encode_jpeg(
tf.constant(np.ones((4, 4, 3)).astype(np.uint8))).eval()
encoded_image = tf.io.encode_jpeg(
tf.constant(np.ones((4, 4, 3)).astype(np.uint8))).numpy()
def BytesFeature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
......@@ -335,7 +316,8 @@ class GenerateEmbeddingData(tf.test.TestCase):
filenames = tf.io.gfile.glob(
output_tfrecord + '-?????-of-?????')
actual_output = []
record_iterator = tf.python_io.tf_record_iterator(path=filenames[0])
record_iterator = tf.data.TFRecordDataset(
tf.convert_to_tensor(filenames)).as_numpy_iterator()
for record in record_iterator:
actual_output.append(record)
self.assertEqual(len(actual_output), 1)
......
......@@ -3,7 +3,7 @@ import os
from setuptools import find_packages
from setuptools import setup
REQUIRED_PACKAGES = ['apache-beam', 'pillow', 'lxml', 'matplotlib', 'Cython',
REQUIRED_PACKAGES = ['pillow', 'lxml', 'matplotlib', 'Cython',
'contextlib2', 'tf-slim', 'six', 'pycocotools', 'scipy',
'pandas']
......
......@@ -6,7 +6,9 @@ from setuptools import setup
# Note: adding apache-beam to required packages causes conflict with
# tf-models-offical requirements. These packages request for incompatible
# oauth2client package.
REQUIRED_PACKAGES = ['pillow', 'lxml', 'matplotlib', 'Cython', 'contextlib2',
REQUIRED_PACKAGES = ['avro-python3==1.8.1', 'apache-beam',
'pillow', 'lxml',
'matplotlib', 'Cython', 'contextlib2',
'tf-slim', 'six', 'pycocotools', 'scipy', 'pandas',
'tf-models-official']
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment