Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
855d29db
Commit
855d29db
authored
Jul 29, 2020
by
Sara Beery
Committed by
TF Object Detection Team
Jul 29, 2020
Browse files
Converting context r-cnn dataset tools to TF2
PiperOrigin-RevId: 323805090
parent
a565d720
Changes
10
Show whitespace changes
Inline
Side-by-side
Showing
10 changed files
with
120 additions
and
191 deletions
+120
-191
research/object_detection/dataset_tools/context_rcnn/add_context_to_examples.py
...ion/dataset_tools/context_rcnn/add_context_to_examples.py
+1
-1
research/object_detection/dataset_tools/context_rcnn/add_context_to_examples_tf2_test.py
...et_tools/context_rcnn/add_context_to_examples_tf2_test.py
+13
-13
research/object_detection/dataset_tools/context_rcnn/create_cococameratraps_tfexample_main.py
...ols/context_rcnn/create_cococameratraps_tfexample_main.py
+3
-13
research/object_detection/dataset_tools/context_rcnn/create_cococameratraps_tfexample_tf2_test.py
...context_rcnn/create_cococameratraps_tfexample_tf2_test.py
+9
-5
research/object_detection/dataset_tools/context_rcnn/generate_detection_data.py
...ion/dataset_tools/context_rcnn/generate_detection_data.py
+8
-20
research/object_detection/dataset_tools/context_rcnn/generate_detection_data_tf2_test.py
...et_tools/context_rcnn/generate_detection_data_tf2_test.py
+38
-54
research/object_detection/dataset_tools/context_rcnn/generate_embedding_data.py
...ion/dataset_tools/context_rcnn/generate_embedding_data.py
+8
-29
research/object_detection/dataset_tools/context_rcnn/generate_embedding_data_tf2_test.py
...et_tools/context_rcnn/generate_embedding_data_tf2_test.py
+36
-54
research/object_detection/packages/tf1/setup.py
research/object_detection/packages/tf1/setup.py
+1
-1
research/object_detection/packages/tf2/setup.py
research/object_detection/packages/tf2/setup.py
+3
-1
No files found.
research/object_detection/dataset_tools/context_rcnn/add_context_to_examples.py
View file @
855d29db
...
@@ -53,7 +53,7 @@ import os
...
@@ -53,7 +53,7 @@ import os
import
numpy
as
np
import
numpy
as
np
import
PIL.Image
import
PIL.Image
import
six
import
six
import
tensorflow
.compat.v1
as
tf
import
tensorflow
as
tf
try
:
try
:
import
apache_beam
as
beam
# pylint:disable=g-import-not-at-top
import
apache_beam
as
beam
# pylint:disable=g-import-not-at-top
...
...
research/object_detection/dataset_tools/context_rcnn/add_context_to_examples_tf
1
_test.py
→
research/object_detection/dataset_tools/context_rcnn/add_context_to_examples_tf
2
_test.py
View file @
855d29db
...
@@ -25,11 +25,12 @@ import unittest
...
@@ -25,11 +25,12 @@ import unittest
import
numpy
as
np
import
numpy
as
np
import
six
import
six
import
tensorflow
.compat.v1
as
tf
import
tensorflow
as
tf
from
object_detection.dataset_tools.context_rcnn
import
add_context_to_examples
from
object_detection.utils
import
tf_version
from
object_detection.utils
import
tf_version
if
tf_version
.
is_tf2
():
from
object_detection.dataset_tools.context_rcnn
import
add_context_to_examples
# pylint:disable=g-import-not-at-top
try
:
try
:
import
apache_beam
as
beam
# pylint:disable=g-import-not-at-top
import
apache_beam
as
beam
# pylint:disable=g-import-not-at-top
...
@@ -42,7 +43,7 @@ def InMemoryTFRecord(entries):
...
@@ -42,7 +43,7 @@ def InMemoryTFRecord(entries):
temp
=
tempfile
.
NamedTemporaryFile
(
delete
=
False
)
temp
=
tempfile
.
NamedTemporaryFile
(
delete
=
False
)
filename
=
temp
.
name
filename
=
temp
.
name
try
:
try
:
with
tf
.
python_
io
.
TFRecordWriter
(
filename
)
as
writer
:
with
tf
.
io
.
TFRecordWriter
(
filename
)
as
writer
:
for
value
in
entries
:
for
value
in
entries
:
writer
.
write
(
value
)
writer
.
write
(
value
)
yield
filename
yield
filename
...
@@ -70,13 +71,12 @@ def FloatListFeature(value):
...
@@ -70,13 +71,12 @@ def FloatListFeature(value):
return
tf
.
train
.
Feature
(
float_list
=
tf
.
train
.
FloatList
(
value
=
value
))
return
tf
.
train
.
Feature
(
float_list
=
tf
.
train
.
FloatList
(
value
=
value
))
@
unittest
.
skipIf
(
tf_version
.
is_tf
2
(),
'Skipping TF
1
.X only test.'
)
@
unittest
.
skipIf
(
tf_version
.
is_tf
1
(),
'Skipping TF
2
.X only test.'
)
class
GenerateContextDataTest
(
tf
.
test
.
TestCase
):
class
GenerateContextDataTest
(
tf
.
test
.
TestCase
):
def
_create_first_tf_example
(
self
):
def
_create_first_tf_example
(
self
):
with
self
.
test_session
():
encoded_image
=
tf
.
io
.
encode_jpeg
(
encoded_image
=
tf
.
image
.
encode_jpeg
(
tf
.
constant
(
np
.
ones
((
4
,
4
,
3
)).
astype
(
np
.
uint8
))).
numpy
()
tf
.
constant
(
np
.
ones
((
4
,
4
,
3
)).
astype
(
np
.
uint8
))).
eval
()
example
=
tf
.
train
.
Example
(
features
=
tf
.
train
.
Features
(
feature
=
{
example
=
tf
.
train
.
Example
(
features
=
tf
.
train
.
Features
(
feature
=
{
'image/encoded'
:
BytesFeature
(
encoded_image
),
'image/encoded'
:
BytesFeature
(
encoded_image
),
...
@@ -105,9 +105,8 @@ class GenerateContextDataTest(tf.test.TestCase):
...
@@ -105,9 +105,8 @@ class GenerateContextDataTest(tf.test.TestCase):
return
example
.
SerializeToString
()
return
example
.
SerializeToString
()
def
_create_second_tf_example
(
self
):
def
_create_second_tf_example
(
self
):
with
self
.
test_session
():
encoded_image
=
tf
.
io
.
encode_jpeg
(
encoded_image
=
tf
.
image
.
encode_jpeg
(
tf
.
constant
(
np
.
ones
((
4
,
4
,
3
)).
astype
(
np
.
uint8
))).
numpy
()
tf
.
constant
(
np
.
ones
((
4
,
4
,
3
)).
astype
(
np
.
uint8
))).
eval
()
example
=
tf
.
train
.
Example
(
features
=
tf
.
train
.
Features
(
feature
=
{
example
=
tf
.
train
.
Example
(
features
=
tf
.
train
.
Features
(
feature
=
{
'image/encoded'
:
BytesFeature
(
encoded_image
),
'image/encoded'
:
BytesFeature
(
encoded_image
),
...
@@ -353,7 +352,8 @@ class GenerateContextDataTest(tf.test.TestCase):
...
@@ -353,7 +352,8 @@ class GenerateContextDataTest(tf.test.TestCase):
p
.
run
()
p
.
run
()
filenames
=
tf
.
io
.
gfile
.
glob
(
output_tfrecord
+
'-?????-of-?????'
)
filenames
=
tf
.
io
.
gfile
.
glob
(
output_tfrecord
+
'-?????-of-?????'
)
actual_output
=
[]
actual_output
=
[]
record_iterator
=
tf
.
python_io
.
tf_record_iterator
(
path
=
filenames
[
0
])
record_iterator
=
tf
.
data
.
TFRecordDataset
(
tf
.
convert_to_tensor
(
filenames
)).
as_numpy_iterator
()
for
record
in
record_iterator
:
for
record
in
record_iterator
:
actual_output
.
append
(
record
)
actual_output
.
append
(
record
)
self
.
assertEqual
(
len
(
actual_output
),
2
)
self
.
assertEqual
(
len
(
actual_output
),
2
)
...
@@ -383,8 +383,8 @@ class GenerateContextDataTest(tf.test.TestCase):
...
@@ -383,8 +383,8 @@ class GenerateContextDataTest(tf.test.TestCase):
p
.
run
()
p
.
run
()
filenames
=
tf
.
io
.
gfile
.
glob
(
output_tfrecord
+
'-?????-of-?????'
)
filenames
=
tf
.
io
.
gfile
.
glob
(
output_tfrecord
+
'-?????-of-?????'
)
actual_output
=
[]
actual_output
=
[]
record_iterator
=
tf
.
python_io
.
tf_record_iterator
(
record_iterator
=
tf
.
data
.
TFRecordDataset
(
path
=
filenames
[
0
]
)
tf
.
convert_to_tensor
(
filenames
)).
as_numpy_iterator
(
)
for
record
in
record_iterator
:
for
record
in
record_iterator
:
actual_output
.
append
(
record
)
actual_output
.
append
(
record
)
self
.
assertEqual
(
len
(
actual_output
),
1
)
self
.
assertEqual
(
len
(
actual_output
),
1
)
...
...
research/object_detection/dataset_tools/context_rcnn/create_cococameratraps_tfexample_main.py
View file @
855d29db
...
@@ -37,11 +37,10 @@ import argparse
...
@@ -37,11 +37,10 @@ import argparse
import
hashlib
import
hashlib
import
io
import
io
import
json
import
json
import
logging
import
os
import
os
import
numpy
as
np
import
numpy
as
np
import
PIL.Image
import
PIL.Image
import
tensorflow
.compat.v1
as
tf
import
tensorflow
as
tf
from
object_detection.utils
import
dataset_util
from
object_detection.utils
import
dataset_util
try
:
try
:
...
@@ -110,16 +109,9 @@ class ParseImage(beam.DoFn):
...
@@ -110,16 +109,9 @@ class ParseImage(beam.DoFn):
encoded_jpg
=
fid
.
read
()
encoded_jpg
=
fid
.
read
()
encoded_jpg_io
=
io
.
BytesIO
(
encoded_jpg
)
encoded_jpg_io
=
io
.
BytesIO
(
encoded_jpg
)
image
=
PIL
.
Image
.
open
(
encoded_jpg_io
)
image
=
PIL
.
Image
.
open
(
encoded_jpg_io
)
# Ensure the image can be read by tf
image
=
tf
.
io
.
decode_jpeg
(
encoded_jpg
,
channels
=
3
)
with
tf
.
Graph
().
as_default
():
except
Exception
:
# pylint: disable=broad-except
image
=
tf
.
image
.
decode_jpeg
(
encoded_jpg
,
channels
=
3
)
init_op
=
tf
.
initialize_all_tables
()
with
tf
.
Session
()
as
sess
:
sess
.
run
(
init_op
)
sess
.
run
(
image
)
except
Exception
as
e
:
# pylint: disable=broad-except
# The image file is missing or corrupt
# The image file is missing or corrupt
tf
.
logging
.
error
(
str
(
e
))
return
[]
return
[]
key
=
hashlib
.
sha256
(
encoded_jpg
).
hexdigest
()
key
=
hashlib
.
sha256
(
encoded_jpg
).
hexdigest
()
...
@@ -257,8 +249,6 @@ def create_pipeline(pipeline,
...
@@ -257,8 +249,6 @@ def create_pipeline(pipeline,
keep_bboxes: Whether to keep any bounding boxes that exist in the json file
keep_bboxes: Whether to keep any bounding boxes that exist in the json file
"""
"""
logging
.
info
(
'Reading data from COCO-CameraTraps Dataset.'
)
data
=
load_json_data
(
input_annotations_file
)
data
=
load_json_data
(
input_annotations_file
)
num_shards
=
int
(
np
.
ceil
(
float
(
len
(
data
[
'images'
]))
/
num_images_per_shard
))
num_shards
=
int
(
np
.
ceil
(
float
(
len
(
data
[
'images'
]))
/
num_images_per_shard
))
...
...
research/object_detection/dataset_tools/context_rcnn/create_cococameratraps_tfexample_tf
1
_test.py
→
research/object_detection/dataset_tools/context_rcnn/create_cococameratraps_tfexample_tf
2
_test.py
View file @
855d29db
...
@@ -25,17 +25,19 @@ import unittest
...
@@ -25,17 +25,19 @@ import unittest
import
numpy
as
np
import
numpy
as
np
from
PIL
import
Image
from
PIL
import
Image
import
tensorflow.compat.v1
as
tf
import
tensorflow
as
tf
from
object_detection.dataset_tools.context_rcnn
import
create_cococameratraps_tfexample_main
from
object_detection.utils
import
tf_version
from
object_detection.utils
import
tf_version
if
tf_version
.
is_tf2
():
from
object_detection.dataset_tools.context_rcnn
import
create_cococameratraps_tfexample_main
# pylint:disable=g-import-not-at-top
try
:
try
:
import
apache_beam
as
beam
# pylint:disable=g-import-not-at-top
import
apache_beam
as
beam
# pylint:disable=g-import-not-at-top
except
ModuleNotFoundError
:
except
ModuleNotFoundError
:
pass
pass
@
unittest
.
skipIf
(
tf_version
.
is_tf
2
(),
'Skipping TF
1
.X only test.'
)
@
unittest
.
skipIf
(
tf_version
.
is_tf
1
(),
'Skipping TF
2
.X only test.'
)
class
CreateCOCOCameraTrapsTfexampleTest
(
tf
.
test
.
TestCase
):
class
CreateCOCOCameraTrapsTfexampleTest
(
tf
.
test
.
TestCase
):
IMAGE_HEIGHT
=
360
IMAGE_HEIGHT
=
360
...
@@ -175,7 +177,8 @@ class CreateCOCOCameraTrapsTfexampleTest(tf.test.TestCase):
...
@@ -175,7 +177,8 @@ class CreateCOCOCameraTrapsTfexampleTest(tf.test.TestCase):
p
.
run
()
p
.
run
()
filenames
=
tf
.
io
.
gfile
.
glob
(
output_tfrecord
+
'-?????-of-?????'
)
filenames
=
tf
.
io
.
gfile
.
glob
(
output_tfrecord
+
'-?????-of-?????'
)
actual_output
=
[]
actual_output
=
[]
record_iterator
=
tf
.
python_io
.
tf_record_iterator
(
path
=
filenames
[
0
])
record_iterator
=
tf
.
data
.
TFRecordDataset
(
tf
.
convert_to_tensor
(
filenames
)).
as_numpy_iterator
()
for
record
in
record_iterator
:
for
record
in
record_iterator
:
actual_output
.
append
(
record
)
actual_output
.
append
(
record
)
self
.
assertEqual
(
len
(
actual_output
),
num_frames
)
self
.
assertEqual
(
len
(
actual_output
),
num_frames
)
...
@@ -198,7 +201,8 @@ class CreateCOCOCameraTrapsTfexampleTest(tf.test.TestCase):
...
@@ -198,7 +201,8 @@ class CreateCOCOCameraTrapsTfexampleTest(tf.test.TestCase):
p
.
run
()
p
.
run
()
filenames
=
tf
.
io
.
gfile
.
glob
(
output_tfrecord
+
'-?????-of-?????'
)
filenames
=
tf
.
io
.
gfile
.
glob
(
output_tfrecord
+
'-?????-of-?????'
)
actual_output
=
[]
actual_output
=
[]
record_iterator
=
tf
.
python_io
.
tf_record_iterator
(
path
=
filenames
[
0
])
record_iterator
=
tf
.
data
.
TFRecordDataset
(
tf
.
convert_to_tensor
(
filenames
)).
as_numpy_iterator
()
for
record
in
record_iterator
:
for
record
in
record_iterator
:
actual_output
.
append
(
record
)
actual_output
.
append
(
record
)
self
.
assertEqual
(
len
(
actual_output
),
num_frames
)
self
.
assertEqual
(
len
(
actual_output
),
num_frames
)
...
...
research/object_detection/dataset_tools/context_rcnn/generate_detection_data.py
View file @
855d29db
...
@@ -48,7 +48,8 @@ from __future__ import print_function
...
@@ -48,7 +48,8 @@ from __future__ import print_function
import
argparse
import
argparse
import
os
import
os
import
threading
import
threading
import
tensorflow.compat.v1
as
tf
import
tensorflow
as
tf
try
:
try
:
import
apache_beam
as
beam
# pylint:disable=g-import-not-at-top
import
apache_beam
as
beam
# pylint:disable=g-import-not-at-top
except
ModuleNotFoundError
:
except
ModuleNotFoundError
:
...
@@ -85,22 +86,7 @@ class GenerateDetectionDataFn(beam.DoFn):
...
@@ -85,22 +86,7 @@ class GenerateDetectionDataFn(beam.DoFn):
# one instance across all threads in the worker. This is possible since
# one instance across all threads in the worker. This is possible since
# tf.Session.run() is thread safe.
# tf.Session.run() is thread safe.
with
self
.
session_lock
:
with
self
.
session_lock
:
if
self
.
_session
is
None
:
self
.
_detect_fn
=
tf
.
saved_model
.
load
(
self
.
_model_dir
)
graph
=
tf
.
Graph
()
self
.
_session
=
tf
.
Session
(
graph
=
graph
)
with
graph
.
as_default
():
meta_graph
=
tf
.
saved_model
.
loader
.
load
(
self
.
_session
,
[
tf
.
saved_model
.
tag_constants
.
SERVING
],
self
.
_model_dir
)
signature
=
meta_graph
.
signature_def
[
'serving_default'
]
input_tensor_name
=
signature
.
inputs
[
'inputs'
].
name
self
.
_input
=
graph
.
get_tensor_by_name
(
input_tensor_name
)
self
.
_boxes_node
=
graph
.
get_tensor_by_name
(
signature
.
outputs
[
'detection_boxes'
].
name
)
self
.
_scores_node
=
graph
.
get_tensor_by_name
(
signature
.
outputs
[
'detection_scores'
].
name
)
self
.
_num_detections_node
=
graph
.
get_tensor_by_name
(
signature
.
outputs
[
'num_detections'
].
name
)
def
process
(
self
,
tfrecord_entry
):
def
process
(
self
,
tfrecord_entry
):
return
self
.
_run_inference_and_generate_detections
(
tfrecord_entry
)
return
self
.
_run_inference_and_generate_detections
(
tfrecord_entry
)
...
@@ -112,9 +98,11 @@ class GenerateDetectionDataFn(beam.DoFn):
...
@@ -112,9 +98,11 @@ class GenerateDetectionDataFn(beam.DoFn):
# There are already ground truth boxes for this image, just keep them.
# There are already ground truth boxes for this image, just keep them.
return
[
input_example
]
return
[
input_example
]
detection_boxes
,
detection_scores
,
num_detections
=
self
.
_session
.
run
(
detections
=
self
.
_detect_fn
.
signatures
[
'serving_default'
](
[
self
.
_boxes_node
,
self
.
_scores_node
,
self
.
_num_detections_node
],
(
tf
.
expand_dims
(
tf
.
convert_to_tensor
(
tfrecord_entry
),
0
)))
feed_dict
=
{
self
.
_input
:
[
tfrecord_entry
]})
detection_boxes
=
detections
[
'detection_boxes'
]
num_detections
=
detections
[
'num_detections'
]
detection_scores
=
detections
[
'detection_scores'
]
example
=
tf
.
train
.
Example
()
example
=
tf
.
train
.
Example
()
...
...
research/object_detection/dataset_tools/context_rcnn/generate_detection_data_tf
1
_test.py
→
research/object_detection/dataset_tools/context_rcnn/generate_detection_data_tf
2
_test.py
View file @
855d29db
...
@@ -24,15 +24,17 @@ import tempfile
...
@@ -24,15 +24,17 @@ import tempfile
import
unittest
import
unittest
import
numpy
as
np
import
numpy
as
np
import
six
import
six
import
tensorflow
.compat.v1
as
tf
import
tensorflow
as
tf
from
object_detection
import
exporter
from
object_detection
import
exporter
_lib_v2
from
object_detection.builders
import
model_builder
from
object_detection.builders
import
model_builder
from
object_detection.core
import
model
from
object_detection.core
import
model
from
object_detection.dataset_tools.context_rcnn
import
generate_detection_data
from
object_detection.protos
import
pipeline_pb2
from
object_detection.protos
import
pipeline_pb2
from
object_detection.utils
import
tf_version
from
object_detection.utils
import
tf_version
if
tf_version
.
is_tf2
():
from
object_detection.dataset_tools.context_rcnn
import
generate_detection_data
# pylint:disable=g-import-not-at-top
if
six
.
PY2
:
if
six
.
PY2
:
import
mock
# pylint: disable=g-import-not-at-top
import
mock
# pylint: disable=g-import-not-at-top
else
:
else
:
...
@@ -45,17 +47,23 @@ except ModuleNotFoundError:
...
@@ -45,17 +47,23 @@ except ModuleNotFoundError:
class
FakeModel
(
model
.
DetectionModel
):
class
FakeModel
(
model
.
DetectionModel
):
"""A Fake Detection model with expected output nodes from post-processing."""
def
__init__
(
self
,
conv_weight_scalar
=
1.0
):
super
(
FakeModel
,
self
).
__init__
(
num_classes
=
5
)
self
.
_conv
=
tf
.
keras
.
layers
.
Conv2D
(
filters
=
1
,
kernel_size
=
1
,
strides
=
(
1
,
1
),
padding
=
'valid'
,
kernel_initializer
=
tf
.
keras
.
initializers
.
Constant
(
value
=
conv_weight_scalar
))
def
preprocess
(
self
,
inputs
):
def
preprocess
(
self
,
inputs
):
true_image_shapes
=
[]
# Doesn't matter for the fake model.
true_image_shapes
=
[]
# Doesn't matter for the fake model.
return
tf
.
identity
(
inputs
),
true_image_shapes
return
tf
.
identity
(
inputs
),
true_image_shapes
def
predict
(
self
,
preprocessed_inputs
,
true_image_shapes
):
def
predict
(
self
,
preprocessed_inputs
,
true_image_shapes
):
return
{
'image'
:
tf
.
layers
.
conv
2d
(
preprocessed_inputs
,
3
,
1
)}
return
{
'image'
:
self
.
_
conv
(
preprocessed_inputs
)}
def
postprocess
(
self
,
prediction_dict
,
true_image_shapes
):
def
postprocess
(
self
,
prediction_dict
,
true_image_shapes
):
with
tf
.
control_dependencies
(
prediction_dict
.
values
()):
with
tf
.
control_dependencies
(
list
(
prediction_dict
.
values
())
)
:
postprocessed_tensors
=
{
postprocessed_tensors
=
{
'detection_boxes'
:
tf
.
constant
([[[
0.0
,
0.1
,
0.5
,
0.6
],
'detection_boxes'
:
tf
.
constant
([[[
0.0
,
0.1
,
0.5
,
0.6
],
[
0.5
,
0.5
,
0.8
,
0.8
]]],
tf
.
float32
),
[
0.5
,
0.5
,
0.8
,
0.8
]]],
tf
.
float32
),
...
@@ -89,7 +97,7 @@ def InMemoryTFRecord(entries):
...
@@ -89,7 +97,7 @@ def InMemoryTFRecord(entries):
temp
=
tempfile
.
NamedTemporaryFile
(
delete
=
False
)
temp
=
tempfile
.
NamedTemporaryFile
(
delete
=
False
)
filename
=
temp
.
name
filename
=
temp
.
name
try
:
try
:
with
tf
.
python_
io
.
TFRecordWriter
(
filename
)
as
writer
:
with
tf
.
io
.
TFRecordWriter
(
filename
)
as
writer
:
for
value
in
entries
:
for
value
in
entries
:
writer
.
write
(
value
)
writer
.
write
(
value
)
yield
filename
yield
filename
...
@@ -97,7 +105,7 @@ def InMemoryTFRecord(entries):
...
@@ -97,7 +105,7 @@ def InMemoryTFRecord(entries):
os
.
unlink
(
filename
)
os
.
unlink
(
filename
)
@
unittest
.
skipIf
(
tf_version
.
is_tf
2
(),
'Skipping TF
1
.X only test.'
)
@
unittest
.
skipIf
(
tf_version
.
is_tf
1
(),
'Skipping TF
2
.X only test.'
)
class
GenerateDetectionDataTest
(
tf
.
test
.
TestCase
):
class
GenerateDetectionDataTest
(
tf
.
test
.
TestCase
):
def
_save_checkpoint_from_mock_model
(
self
,
checkpoint_path
):
def
_save_checkpoint_from_mock_model
(
self
,
checkpoint_path
):
...
@@ -106,64 +114,39 @@ class GenerateDetectionDataTest(tf.test.TestCase):
...
@@ -106,64 +114,39 @@ class GenerateDetectionDataTest(tf.test.TestCase):
Args:
Args:
checkpoint_path: Path to save checkpoint from Fake model.
checkpoint_path: Path to save checkpoint from Fake model.
"""
"""
g
=
tf
.
Graph
()
mock_model
=
FakeModel
()
with
g
.
as_default
():
fake_image
=
tf
.
zeros
(
shape
=
[
1
,
10
,
10
,
3
],
dtype
=
tf
.
float32
)
mock_model
=
FakeModel
(
num_classes
=
5
)
preprocessed_inputs
,
true_image_shapes
=
mock_model
.
preprocess
(
fake_image
)
preprocessed_inputs
,
true_image_shapes
=
mock_model
.
preprocess
(
tf
.
placeholder
(
tf
.
float32
,
shape
=
[
None
,
None
,
None
,
3
]))
predictions
=
mock_model
.
predict
(
preprocessed_inputs
,
true_image_shapes
)
predictions
=
mock_model
.
predict
(
preprocessed_inputs
,
true_image_shapes
)
mock_model
.
postprocess
(
predictions
,
true_image_shapes
)
mock_model
.
postprocess
(
predictions
,
true_image_shapes
)
tf
.
train
.
get_or_create_global_step
()
ckpt
=
tf
.
train
.
Checkpoint
(
model
=
mock_model
)
saver
=
tf
.
train
.
Saver
()
exported_checkpoint_manager
=
tf
.
train
.
CheckpointManager
(
init
=
tf
.
global_variables_initializer
()
ckpt
,
checkpoint_path
,
max_to_keep
=
1
)
with
self
.
test_session
(
graph
=
g
)
as
sess
:
exported_checkpoint_manager
.
save
(
checkpoint_number
=
0
)
sess
.
run
(
init
)
saver
.
save
(
sess
,
checkpoint_path
)
def
_export_saved_model
(
self
):
def
_export_saved_model
(
self
):
tmp_dir
=
self
.
get_temp_dir
()
tmp_dir
=
self
.
get_temp_dir
()
checkpoint_path
=
os
.
path
.
join
(
tmp_dir
,
'model.ckpt'
)
self
.
_save_checkpoint_from_mock_model
(
tmp_dir
)
self
.
_save_checkpoint_from_mock_model
(
checkpoint_path
)
output_directory
=
os
.
path
.
join
(
tmp_dir
,
'output'
)
output_directory
=
os
.
path
.
join
(
tmp_dir
,
'output'
)
saved_model_path
=
os
.
path
.
join
(
output_directory
,
'saved_model'
)
saved_model_path
=
os
.
path
.
join
(
output_directory
,
'saved_model'
)
tf
.
io
.
gfile
.
makedirs
(
output_directory
)
tf
.
io
.
gfile
.
makedirs
(
output_directory
)
with
mock
.
patch
.
object
(
with
mock
.
patch
.
object
(
model_builder
,
'build'
,
autospec
=
True
)
as
mock_builder
:
model_builder
,
'build'
,
autospec
=
True
)
as
mock_builder
:
mock_builder
.
return_value
=
FakeModel
(
num_classes
=
5
)
mock_builder
.
return_value
=
FakeModel
()
output_directory
=
os
.
path
.
join
(
tmp_dir
,
'output'
)
pipeline_config
=
pipeline_pb2
.
TrainEvalPipelineConfig
()
pipeline_config
=
pipeline_pb2
.
TrainEvalPipelineConfig
()
pipeline_config
.
eval_config
.
use_moving_averages
=
False
exporter_lib_v2
.
export_inference_graph
(
detection_model
=
model_builder
.
build
(
pipeline_config
.
model
,
is_training
=
False
)
outputs
,
placeholder_tensor
=
exporter
.
build_detection_graph
(
input_type
=
'tf_example'
,
input_type
=
'tf_example'
,
detection_model
=
detection_model
,
pipeline_config
=
pipeline_config
,
input_shape
=
None
,
trained_checkpoint_dir
=
tmp_dir
,
output_collection_name
=
'inference_op'
,
output_directory
=
output_directory
)
graph_hook_fn
=
None
)
saved_model_path
=
os
.
path
.
join
(
output_directory
,
'saved_model'
)
output_node_names
=
','
.
join
(
outputs
.
keys
())
saver
=
tf
.
train
.
Saver
()
input_saver_def
=
saver
.
as_saver_def
()
frozen_graph_def
=
exporter
.
freeze_graph_with_def_protos
(
input_graph_def
=
tf
.
get_default_graph
().
as_graph_def
(),
input_saver_def
=
input_saver_def
,
input_checkpoint
=
checkpoint_path
,
output_node_names
=
output_node_names
,
restore_op_name
=
'save/restore_all'
,
filename_tensor_name
=
'save/Const:0'
,
output_graph
=
''
,
clear_devices
=
True
,
initializer_nodes
=
''
)
exporter
.
write_saved_model
(
saved_model_path
=
saved_model_path
,
frozen_graph_def
=
frozen_graph_def
,
inputs
=
placeholder_tensor
,
outputs
=
outputs
)
return
saved_model_path
return
saved_model_path
def
_create_tf_example
(
self
):
def
_create_tf_example
(
self
):
with
self
.
test_session
():
with
self
.
test_session
():
encoded_image
=
tf
.
i
mage
.
encode_jpeg
(
encoded_image
=
tf
.
i
o
.
encode_jpeg
(
tf
.
constant
(
np
.
ones
((
4
,
6
,
3
)).
astype
(
np
.
uint8
))).
eval
()
tf
.
constant
(
np
.
ones
((
4
,
6
,
3
)).
astype
(
np
.
uint8
))).
numpy
()
def
BytesFeature
(
value
):
def
BytesFeature
(
value
):
return
tf
.
train
.
Feature
(
bytes_list
=
tf
.
train
.
BytesList
(
value
=
[
value
]))
return
tf
.
train
.
Feature
(
bytes_list
=
tf
.
train
.
BytesList
(
value
=
[
value
]))
...
@@ -264,7 +247,8 @@ class GenerateDetectionDataTest(tf.test.TestCase):
...
@@ -264,7 +247,8 @@ class GenerateDetectionDataTest(tf.test.TestCase):
p
.
run
()
p
.
run
()
filenames
=
tf
.
io
.
gfile
.
glob
(
output_tfrecord
+
'-?????-of-?????'
)
filenames
=
tf
.
io
.
gfile
.
glob
(
output_tfrecord
+
'-?????-of-?????'
)
actual_output
=
[]
actual_output
=
[]
record_iterator
=
tf
.
python_io
.
tf_record_iterator
(
path
=
filenames
[
0
])
record_iterator
=
tf
.
data
.
TFRecordDataset
(
tf
.
convert_to_tensor
(
filenames
)).
as_numpy_iterator
()
for
record
in
record_iterator
:
for
record
in
record_iterator
:
actual_output
.
append
(
record
)
actual_output
.
append
(
record
)
self
.
assertEqual
(
len
(
actual_output
),
1
)
self
.
assertEqual
(
len
(
actual_output
),
1
)
...
...
research/object_detection/dataset_tools/context_rcnn/generate_embedding_data.py
View file @
855d29db
...
@@ -55,7 +55,7 @@ import threading
...
@@ -55,7 +55,7 @@ import threading
import
numpy
as
np
import
numpy
as
np
import
six
import
six
import
tensorflow
.compat.v1
as
tf
import
tensorflow
as
tf
try
:
try
:
import
apache_beam
as
beam
# pylint:disable=g-import-not-at-top
import
apache_beam
as
beam
# pylint:disable=g-import-not-at-top
...
@@ -95,27 +95,7 @@ class GenerateEmbeddingDataFn(beam.DoFn):
...
@@ -95,27 +95,7 @@ class GenerateEmbeddingDataFn(beam.DoFn):
# one instance across all threads in the worker. This is possible since
# one instance across all threads in the worker. This is possible since
# tf.Session.run() is thread safe.
# tf.Session.run() is thread safe.
with
self
.
session_lock
:
with
self
.
session_lock
:
if
self
.
_session
is
None
:
self
.
_detect_fn
=
tf
.
saved_model
.
load
(
self
.
_model_dir
)
graph
=
tf
.
Graph
()
self
.
_session
=
tf
.
Session
(
graph
=
graph
)
with
graph
.
as_default
():
meta_graph
=
tf
.
saved_model
.
loader
.
load
(
self
.
_session
,
[
tf
.
saved_model
.
tag_constants
.
SERVING
],
self
.
_model_dir
)
signature
=
meta_graph
.
signature_def
[
'serving_default'
]
input_tensor_name
=
signature
.
inputs
[
'inputs'
].
name
detection_features_name
=
signature
.
outputs
[
'detection_features'
].
name
detection_boxes_name
=
signature
.
outputs
[
'detection_boxes'
].
name
num_detections_name
=
signature
.
outputs
[
'num_detections'
].
name
self
.
_input
=
graph
.
get_tensor_by_name
(
input_tensor_name
)
self
.
_embedding_node
=
graph
.
get_tensor_by_name
(
detection_features_name
)
self
.
_box_node
=
graph
.
get_tensor_by_name
(
detection_boxes_name
)
self
.
_scores_node
=
graph
.
get_tensor_by_name
(
signature
.
outputs
[
'detection_scores'
].
name
)
self
.
_num_detections
=
graph
.
get_tensor_by_name
(
num_detections_name
)
tf
.
logging
.
info
(
signature
.
outputs
[
'detection_features'
].
name
)
tf
.
logging
.
info
(
signature
.
outputs
[
'detection_boxes'
].
name
)
tf
.
logging
.
info
(
signature
.
outputs
[
'num_detections'
].
name
)
def
process
(
self
,
tfrecord_entry
):
def
process
(
self
,
tfrecord_entry
):
return
self
.
_run_inference_and_generate_embedding
(
tfrecord_entry
)
return
self
.
_run_inference_and_generate_embedding
(
tfrecord_entry
)
...
@@ -184,13 +164,12 @@ class GenerateEmbeddingDataFn(beam.DoFn):
...
@@ -184,13 +164,12 @@ class GenerateEmbeddingDataFn(beam.DoFn):
example
.
features
.
feature
[
'image/unix_time'
].
float_list
.
value
.
extend
(
example
.
features
.
feature
[
'image/unix_time'
].
float_list
.
value
.
extend
(
[
unix_time
])
[
unix_time
])
(
detection_features
,
detection_boxes
,
num_detections
,
detections
=
self
.
_detect_fn
.
signatures
[
'serving_default'
](
detection_scores
)
=
self
.
_session
.
run
(
(
tf
.
expand_dims
(
tf
.
convert_to_tensor
(
tfrecord_entry
),
0
)))
[
detection_features
=
detections
[
'detection_features'
]
self
.
_embedding_node
,
self
.
_box_node
,
self
.
_num_detections
[
0
],
detection_boxes
=
detections
[
'detection_boxes'
]
self
.
_scores_node
num_detections
=
detections
[
'num_detections'
]
],
detection_scores
=
detections
[
'detection_scores'
]
feed_dict
=
{
self
.
_input
:
[
tfrecord_entry
]})
num_detections
=
int
(
num_detections
)
num_detections
=
int
(
num_detections
)
embed_all
=
[]
embed_all
=
[]
...
...
research/object_detection/dataset_tools/context_rcnn/generate_embedding_data_tf
1
_test.py
→
research/object_detection/dataset_tools/context_rcnn/generate_embedding_data_tf
2
_test.py
View file @
855d29db
...
@@ -23,14 +23,15 @@ import tempfile
...
@@ -23,14 +23,15 @@ import tempfile
import
unittest
import
unittest
import
numpy
as
np
import
numpy
as
np
import
six
import
six
import
tensorflow
.compat.v1
as
tf
import
tensorflow
as
tf
from
object_detection
import
exporter
from
object_detection
import
exporter
_lib_v2
from
object_detection.builders
import
model_builder
from
object_detection.builders
import
model_builder
from
object_detection.core
import
model
from
object_detection.core
import
model
from
object_detection.dataset_tools.context_rcnn
import
generate_embedding_data
from
object_detection.protos
import
pipeline_pb2
from
object_detection.protos
import
pipeline_pb2
from
object_detection.utils
import
tf_version
from
object_detection.utils
import
tf_version
if
tf_version
.
is_tf2
():
from
object_detection.dataset_tools.context_rcnn
import
generate_embedding_data
# pylint:disable=g-import-not-at-top
if
six
.
PY2
:
if
six
.
PY2
:
import
mock
# pylint: disable=g-import-not-at-top
import
mock
# pylint: disable=g-import-not-at-top
...
@@ -44,14 +45,20 @@ except ModuleNotFoundError:
...
@@ -44,14 +45,20 @@ except ModuleNotFoundError:
class
FakeModel
(
model
.
DetectionModel
):
class
FakeModel
(
model
.
DetectionModel
):
"""A Fake Detection model with expected output nodes from post-processing."""
def
__init__
(
self
,
conv_weight_scalar
=
1.0
):
super
(
FakeModel
,
self
).
__init__
(
num_classes
=
5
)
self
.
_conv
=
tf
.
keras
.
layers
.
Conv2D
(
filters
=
1
,
kernel_size
=
1
,
strides
=
(
1
,
1
),
padding
=
'valid'
,
kernel_initializer
=
tf
.
keras
.
initializers
.
Constant
(
value
=
conv_weight_scalar
))
def
preprocess
(
self
,
inputs
):
def
preprocess
(
self
,
inputs
):
true_image_shapes
=
[]
# Doesn't matter for the fake model.
true_image_shapes
=
[]
# Doesn't matter for the fake model.
return
tf
.
identity
(
inputs
),
true_image_shapes
return
tf
.
identity
(
inputs
),
true_image_shapes
def
predict
(
self
,
preprocessed_inputs
,
true_image_shapes
):
def
predict
(
self
,
preprocessed_inputs
,
true_image_shapes
):
return
{
'image'
:
tf
.
layers
.
conv
2d
(
preprocessed_inputs
,
3
,
1
)}
return
{
'image'
:
self
.
_
conv
(
preprocessed_inputs
)}
def
postprocess
(
self
,
prediction_dict
,
true_image_shapes
):
def
postprocess
(
self
,
prediction_dict
,
true_image_shapes
):
with
tf
.
control_dependencies
(
prediction_dict
.
values
()):
with
tf
.
control_dependencies
(
prediction_dict
.
values
()):
...
@@ -96,7 +103,7 @@ def InMemoryTFRecord(entries):
...
@@ -96,7 +103,7 @@ def InMemoryTFRecord(entries):
temp
=
tempfile
.
NamedTemporaryFile
(
delete
=
False
)
temp
=
tempfile
.
NamedTemporaryFile
(
delete
=
False
)
filename
=
temp
.
name
filename
=
temp
.
name
try
:
try
:
with
tf
.
python_
io
.
TFRecordWriter
(
filename
)
as
writer
:
with
tf
.
io
.
TFRecordWriter
(
filename
)
as
writer
:
for
value
in
entries
:
for
value
in
entries
:
writer
.
write
(
value
)
writer
.
write
(
value
)
yield
filename
yield
filename
...
@@ -104,7 +111,7 @@ def InMemoryTFRecord(entries):
...
@@ -104,7 +111,7 @@ def InMemoryTFRecord(entries):
os
.
unlink
(
temp
.
name
)
os
.
unlink
(
temp
.
name
)
@
unittest
.
skipIf
(
tf_version
.
is_tf
2
(),
'Skipping TF
1
.X only test.'
)
@
unittest
.
skipIf
(
tf_version
.
is_tf
1
(),
'Skipping TF
2
.X only test.'
)
class
GenerateEmbeddingData
(
tf
.
test
.
TestCase
):
class
GenerateEmbeddingData
(
tf
.
test
.
TestCase
):
def
_save_checkpoint_from_mock_model
(
self
,
checkpoint_path
):
def
_save_checkpoint_from_mock_model
(
self
,
checkpoint_path
):
...
@@ -113,64 +120,38 @@ class GenerateEmbeddingData(tf.test.TestCase):
...
@@ -113,64 +120,38 @@ class GenerateEmbeddingData(tf.test.TestCase):
Args:
Args:
checkpoint_path: Path to save checkpoint from Fake model.
checkpoint_path: Path to save checkpoint from Fake model.
"""
"""
g
=
tf
.
Graph
()
mock_model
=
FakeModel
()
with
g
.
as_default
():
fake_image
=
tf
.
zeros
(
shape
=
[
1
,
10
,
10
,
3
],
dtype
=
tf
.
float32
)
mock_model
=
FakeModel
(
num_classes
=
5
)
preprocessed_inputs
,
true_image_shapes
=
mock_model
.
preprocess
(
fake_image
)
preprocessed_inputs
,
true_image_shapes
=
mock_model
.
preprocess
(
tf
.
placeholder
(
tf
.
float32
,
shape
=
[
None
,
None
,
None
,
3
]))
predictions
=
mock_model
.
predict
(
preprocessed_inputs
,
true_image_shapes
)
predictions
=
mock_model
.
predict
(
preprocessed_inputs
,
true_image_shapes
)
mock_model
.
postprocess
(
predictions
,
true_image_shapes
)
mock_model
.
postprocess
(
predictions
,
true_image_shapes
)
tf
.
train
.
get_or_create_global_step
()
ckpt
=
tf
.
train
.
Checkpoint
(
model
=
mock_model
)
saver
=
tf
.
train
.
Saver
()
exported_checkpoint_manager
=
tf
.
train
.
CheckpointManager
(
init
=
tf
.
global_variables_initializer
()
ckpt
,
checkpoint_path
,
max_to_keep
=
1
)
with
self
.
test_session
(
graph
=
g
)
as
sess
:
exported_checkpoint_manager
.
save
(
checkpoint_number
=
0
)
sess
.
run
(
init
)
saver
.
save
(
sess
,
checkpoint_path
)
def
_export_saved_model
(
self
):
def
_export_saved_model
(
self
):
tmp_dir
=
self
.
get_temp_dir
()
tmp_dir
=
self
.
get_temp_dir
()
checkpoint_path
=
os
.
path
.
join
(
tmp_dir
,
'model.ckpt'
)
self
.
_save_checkpoint_from_mock_model
(
tmp_dir
)
self
.
_save_checkpoint_from_mock_model
(
checkpoint_path
)
output_directory
=
os
.
path
.
join
(
tmp_dir
,
'output'
)
output_directory
=
os
.
path
.
join
(
tmp_dir
,
'output'
)
saved_model_path
=
os
.
path
.
join
(
output_directory
,
'saved_model'
)
saved_model_path
=
os
.
path
.
join
(
output_directory
,
'saved_model'
)
tf
.
io
.
gfile
.
makedirs
(
output_directory
)
tf
.
io
.
gfile
.
makedirs
(
output_directory
)
with
mock
.
patch
.
object
(
with
mock
.
patch
.
object
(
model_builder
,
'build'
,
autospec
=
True
)
as
mock_builder
:
model_builder
,
'build'
,
autospec
=
True
)
as
mock_builder
:
mock_builder
.
return_value
=
FakeModel
(
num_classes
=
5
)
mock_builder
.
return_value
=
FakeModel
()
output_directory
=
os
.
path
.
join
(
tmp_dir
,
'output'
)
pipeline_config
=
pipeline_pb2
.
TrainEvalPipelineConfig
()
pipeline_config
=
pipeline_pb2
.
TrainEvalPipelineConfig
()
pipeline_config
.
eval_config
.
use_moving_averages
=
False
exporter_lib_v2
.
export_inference_graph
(
detection_model
=
model_builder
.
build
(
pipeline_config
.
model
,
is_training
=
False
)
outputs
,
placeholder_tensor
=
exporter
.
build_detection_graph
(
input_type
=
'tf_example'
,
input_type
=
'tf_example'
,
detection_model
=
detection_model
,
pipeline_config
=
pipeline_config
,
input_shape
=
None
,
trained_checkpoint_dir
=
tmp_dir
,
output_collection_name
=
'inference_op'
,
output_directory
=
output_directory
)
graph_hook_fn
=
None
)
saved_model_path
=
os
.
path
.
join
(
output_directory
,
'saved_model'
)
output_node_names
=
','
.
join
(
outputs
.
keys
())
saver
=
tf
.
train
.
Saver
()
input_saver_def
=
saver
.
as_saver_def
()
frozen_graph_def
=
exporter
.
freeze_graph_with_def_protos
(
input_graph_def
=
tf
.
get_default_graph
().
as_graph_def
(),
input_saver_def
=
input_saver_def
,
input_checkpoint
=
checkpoint_path
,
output_node_names
=
output_node_names
,
restore_op_name
=
'save/restore_all'
,
filename_tensor_name
=
'save/Const:0'
,
output_graph
=
''
,
clear_devices
=
True
,
initializer_nodes
=
''
)
exporter
.
write_saved_model
(
saved_model_path
=
saved_model_path
,
frozen_graph_def
=
frozen_graph_def
,
inputs
=
placeholder_tensor
,
outputs
=
outputs
)
return
saved_model_path
return
saved_model_path
def
_create_tf_example
(
self
):
def
_create_tf_example
(
self
):
with
self
.
test_session
():
encoded_image
=
tf
.
io
.
encode_jpeg
(
encoded_image
=
tf
.
image
.
encode_jpeg
(
tf
.
constant
(
np
.
ones
((
4
,
4
,
3
)).
astype
(
np
.
uint8
))).
numpy
()
tf
.
constant
(
np
.
ones
((
4
,
4
,
3
)).
astype
(
np
.
uint8
))).
eval
()
def
BytesFeature
(
value
):
def
BytesFeature
(
value
):
return
tf
.
train
.
Feature
(
bytes_list
=
tf
.
train
.
BytesList
(
value
=
[
value
]))
return
tf
.
train
.
Feature
(
bytes_list
=
tf
.
train
.
BytesList
(
value
=
[
value
]))
...
@@ -335,7 +316,8 @@ class GenerateEmbeddingData(tf.test.TestCase):
...
@@ -335,7 +316,8 @@ class GenerateEmbeddingData(tf.test.TestCase):
filenames
=
tf
.
io
.
gfile
.
glob
(
filenames
=
tf
.
io
.
gfile
.
glob
(
output_tfrecord
+
'-?????-of-?????'
)
output_tfrecord
+
'-?????-of-?????'
)
actual_output
=
[]
actual_output
=
[]
record_iterator
=
tf
.
python_io
.
tf_record_iterator
(
path
=
filenames
[
0
])
record_iterator
=
tf
.
data
.
TFRecordDataset
(
tf
.
convert_to_tensor
(
filenames
)).
as_numpy_iterator
()
for
record
in
record_iterator
:
for
record
in
record_iterator
:
actual_output
.
append
(
record
)
actual_output
.
append
(
record
)
self
.
assertEqual
(
len
(
actual_output
),
1
)
self
.
assertEqual
(
len
(
actual_output
),
1
)
...
...
research/object_detection/packages/tf1/setup.py
View file @
855d29db
...
@@ -3,7 +3,7 @@ import os
...
@@ -3,7 +3,7 @@ import os
from
setuptools
import
find_packages
from
setuptools
import
find_packages
from
setuptools
import
setup
from
setuptools
import
setup
REQUIRED_PACKAGES
=
[
'apache-beam'
,
'pillow'
,
'lxml'
,
'matplotlib'
,
'Cython'
,
REQUIRED_PACKAGES
=
[
'pillow'
,
'lxml'
,
'matplotlib'
,
'Cython'
,
'contextlib2'
,
'tf-slim'
,
'six'
,
'pycocotools'
,
'scipy'
,
'contextlib2'
,
'tf-slim'
,
'six'
,
'pycocotools'
,
'scipy'
,
'pandas'
]
'pandas'
]
...
...
research/object_detection/packages/tf2/setup.py
View file @
855d29db
...
@@ -6,7 +6,9 @@ from setuptools import setup
...
@@ -6,7 +6,9 @@ from setuptools import setup
# Note: adding apache-beam to required packages causes conflict with
# Note: adding apache-beam to required packages causes conflict with
# tf-models-offical requirements. These packages request for incompatible
# tf-models-offical requirements. These packages request for incompatible
# oauth2client package.
# oauth2client package.
REQUIRED_PACKAGES
=
[
'pillow'
,
'lxml'
,
'matplotlib'
,
'Cython'
,
'contextlib2'
,
REQUIRED_PACKAGES
=
[
'avro-python3==1.8.1'
,
'apache-beam'
,
'pillow'
,
'lxml'
,
'matplotlib'
,
'Cython'
,
'contextlib2'
,
'tf-slim'
,
'six'
,
'pycocotools'
,
'scipy'
,
'pandas'
,
'tf-slim'
,
'six'
,
'pycocotools'
,
'scipy'
,
'pandas'
,
'tf-models-official'
]
'tf-models-official'
]
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment