Unverified Commit d5e826e3 authored by Steven Hickson's avatar Steven Hickson Committed by GitHub
Browse files

Merge branch 'master' into master

parents e1ac09e1 fc37f117
...@@ -22,6 +22,7 @@ import abc ...@@ -22,6 +22,7 @@ import abc
import numpy as np import numpy as np
from six.moves import xrange
import tensorflow as tf import tensorflow as tf
from tensorflow.python.ops import nn from tensorflow.python.ops import nn
from tensorflow.python.ops import tensor_array_ops as ta from tensorflow.python.ops import tensor_array_ops as ta
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
"""Utils for building DRAGNN specs.""" """Utils for building DRAGNN specs."""
from six.moves import xrange
import tensorflow as tf import tensorflow as tf
from dragnn.protos import spec_pb2 from dragnn.protos import spec_pb2
......
...@@ -23,6 +23,7 @@ import random ...@@ -23,6 +23,7 @@ import random
import tensorflow as tf import tensorflow as tf
from six.moves import xrange
from tensorflow.core.framework.summary_pb2 import Summary from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.python.framework import errors from tensorflow.python.framework import errors
from tensorflow.python.platform import gfile from tensorflow.python.platform import gfile
......
...@@ -88,12 +88,12 @@ def main(unused_argv): ...@@ -88,12 +88,12 @@ def main(unused_argv):
sentence.ParseFromString(d) sentence.ParseFromString(d)
tr = asciitree.LeftAligned() tr = asciitree.LeftAligned()
d = to_dict(sentence) d = to_dict(sentence)
print 'Input: %s' % sentence.text print('Input: %s' % sentence.text)
print 'Parse:' print('Parse:')
tr_str = tr(d) tr_str = tr(d)
pat = re.compile(r'\s*@\d+$') pat = re.compile(r'\s*@\d+$')
for tr_ln in tr_str.splitlines(): for tr_ln in tr_str.splitlines():
print pat.sub('', tr_ln) print(pat.sub('', tr_ln))
if finished: if finished:
break break
......
...@@ -140,7 +140,7 @@ class LexiconBuilderTest(test_util.TensorFlowTestCase): ...@@ -140,7 +140,7 @@ class LexiconBuilderTest(test_util.TensorFlowTestCase):
self.assertTrue(last) self.assertTrue(last)
def ValidateTagToCategoryMap(self): def ValidateTagToCategoryMap(self):
with file(os.path.join(FLAGS.test_tmpdir, 'tag-to-category'), 'r') as f: with open(os.path.join(FLAGS.test_tmpdir, 'tag-to-category'), 'r') as f:
entries = [line.strip().split('\t') for line in f.readlines()] entries = [line.strip().split('\t') for line in f.readlines()]
for tag, category in entries: for tag, category in entries:
self.assertIn(tag, TAGS) self.assertIn(tag, TAGS)
...@@ -148,7 +148,7 @@ class LexiconBuilderTest(test_util.TensorFlowTestCase): ...@@ -148,7 +148,7 @@ class LexiconBuilderTest(test_util.TensorFlowTestCase):
def LoadMap(self, map_name): def LoadMap(self, map_name):
loaded_map = {} loaded_map = {}
with file(os.path.join(FLAGS.test_tmpdir, map_name), 'r') as f: with open(os.path.join(FLAGS.test_tmpdir, map_name), 'r') as f:
for line in f: for line in f:
entries = line.strip().split(' ') entries = line.strip().split(' ')
if len(entries) >= 2: if len(entries) >= 2:
......
...@@ -76,9 +76,9 @@ def compute_average_alignment( ...@@ -76,9 +76,9 @@ def compute_average_alignment(
alignment = np.mean( alignment = np.mean(
np.abs(np.array(times_i)-np.array(times_j))/float(seq_len)) np.abs(np.array(times_i)-np.array(times_j))/float(seq_len))
all_alignments.append(alignment) all_alignments.append(alignment)
print 'alignment so far %f' % alignment print('alignment so far %f' % alignment)
average_alignment = np.mean(all_alignments) average_alignment = np.mean(all_alignments)
print 'Average alignment %f' % average_alignment print('Average alignment %f' % average_alignment)
summ = tf.Summary(value=[tf.Summary.Value( summ = tf.Summary(value=[tf.Summary.Value(
tag='validation/alignment', simple_value=average_alignment)]) tag='validation/alignment', simple_value=average_alignment)])
summary_writer.add_summary(summ, int(training_step)) summary_writer.add_summary(summ, int(training_step))
......
...@@ -58,6 +58,7 @@ matplotlib.use('TkAgg') ...@@ -58,6 +58,7 @@ matplotlib.use('TkAgg')
from matplotlib import animation # pylint: disable=g-import-not-at-top from matplotlib import animation # pylint: disable=g-import-not-at-top
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
import numpy as np import numpy as np
from six.moves import input
import tensorflow as tf import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO) tf.logging.set_verbosity(tf.logging.INFO)
...@@ -438,7 +439,7 @@ def main(_): ...@@ -438,7 +439,7 @@ def main(_):
tf.logging.info('About to write to:') tf.logging.info('About to write to:')
for v in view_dirs: for v in view_dirs:
tf.logging.info(v) tf.logging.info(v)
raw_input('Press Enter to continue...') input('Press Enter to continue...')
except SyntaxError: except SyntaxError:
pass pass
......
...@@ -22,6 +22,7 @@ from collections import defaultdict ...@@ -22,6 +22,7 @@ from collections import defaultdict
import os import os
import numpy as np import numpy as np
from sklearn.metrics.pairwise import pairwise_distances from sklearn.metrics.pairwise import pairwise_distances
from six.moves import xrange
import data_providers import data_providers
from estimators.get_estimator import get_estimator from estimators.get_estimator import get_estimator
from utils import util from utils import util
......
# TensorFlow for Java: Examples
These examples include using pre-trained models for [image
classification](label_image) and [object detection](object_detection),
and driving the [training](training) of a pre-defined model - all using the
TensorFlow Java API.
The TensorFlow Java API does not have feature parity with the Python API.
The Java API is most suitable for inference using pre-trained models
and for training pre-defined models from a single Java process.
Python will be the most convenient language for defining the
numerical computation of a model.
- [Slides](https://docs.google.com/presentation/d/e/2PACX-1vQ6DzxNTBrJo7K5P8t5_rBRGnyJoPUPBVOJR4ooHCwi4TlBFnIriFmI719rDNpcQzojqsV58aUqmBBx/pub?start=false&loop=false&delayms=3000) from January 2018.
- See README.md in each subdirectory for details.
FROM tensorflow/tensorflow:1.4.0
WORKDIR /
RUN apt-get update
RUN apt-get -y install maven openjdk-8-jdk
RUN mvn dependency:get -Dartifact=org.tensorflow:tensorflow:1.4.0
RUN mvn dependency:get -Dartifact=org.tensorflow:proto:1.4.0
CMD ["/bin/bash", "-l"]
Dockerfile for building an image suitable for running the Java examples.
Typical usage:
```
docker build -t java-tensorflow .
docker run -it --rm -v ${PWD}/..:/examples -w /examples java-tensorflow
```
That second command will pop you into a shell which has all
the dependencies required to execute the scripts and Java
examples.
The script `sanity_test.sh` builds this container and runs a compilation
check on all the maven projects.
#!/bin/bash
#
# Silly sanity test
DIR="$(cd "$(dirname "$0")" && pwd -P)"
docker build -t java-tensorflow .
docker run -it --rm -v ${PWD}/..:/examples java-tensorflow bash /examples/docker/test_inside_container.sh
#!/bin/bash
set -ex
cd /examples/label_image
mvn compile
cd /examples/object_detection
mvn compile
cd /examples/training
mvn compile
images
src/main/resources
target
# Image Classification Example
1. Download the model:
- If you have [TensorFlow 1.4+ for Python installed](https://www.tensorflow.org/install/),
run `python ./download.py`
- If not, but you have [docker](https://www.docker.com/get-docker) installed,
run `download.sh`.
2. Compile [`LabelImage.java`](src/main/java/LabelImage.java):
```
mvn compile
```
3. Download some sample images:
If you already have some images, great. Otherwise `download_sample_images.sh`
gets a few.
3. Classify!
```
mvn -q exec:java -Dexec.args="<path to image file>"
```
"""Create an image classification graph.
Script to download a pre-trained image classifier and tweak it so that
the model accepts raw bytes of an encoded image.
Doing so involves some model-specific normalization of an image.
Ideally, this would have been part of the image classifier model,
but the particular model being used didn't include this normalization,
so this script does the necessary tweaking.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import urllib
import os
import zipfile
import tensorflow as tf
URL = 'https://storage.googleapis.com/download.tensorflow.org/models/inception5h.zip'
LABELS_FILE = 'imagenet_comp_graph_label_strings.txt'
GRAPH_FILE = 'tensorflow_inception_graph.pb'
GRAPH_INPUT_TENSOR = 'input:0'
GRAPH_PROBABILITIES_TENSOR = 'output:0'
IMAGE_HEIGHT = 224
IMAGE_WIDTH = 224
MEAN = 117
SCALE = 1
LOCAL_DIR = 'src/main/resources'
def download():
print('Downloading %s' % URL)
zip_filename, _ = urllib.request.urlretrieve(URL)
with zipfile.ZipFile(zip_filename) as zip:
zip.extract(LABELS_FILE)
zip.extract(GRAPH_FILE)
os.rename(LABELS_FILE, os.path.join(LOCAL_DIR, 'labels.txt'))
os.rename(GRAPH_FILE, os.path.join(LOCAL_DIR, 'graph.pb'))
def create_graph_to_decode_and_normalize_image():
"""See file docstring.
Returns:
input: The placeholder to feed the raw bytes of an encoded image.
y: A Tensor (the decoded, normalized image) to be fed to the graph.
"""
image = tf.placeholder(tf.string, shape=(), name='encoded_image_bytes')
with tf.name_scope("preprocess"):
y = tf.image.decode_image(image, channels=3)
y = tf.cast(y, tf.float32)
y = tf.expand_dims(y, axis=0)
y = tf.image.resize_bilinear(y, (IMAGE_HEIGHT, IMAGE_WIDTH))
y = (y - MEAN) / SCALE
return (image, y)
def patch_graph():
"""Create graph.pb that applies the model in URL to raw image bytes."""
with tf.Graph().as_default() as g:
input_image, image_normalized = create_graph_to_decode_and_normalize_image()
original_graph_def = tf.GraphDef()
with open(os.path.join(LOCAL_DIR, 'graph.pb')) as f:
original_graph_def.ParseFromString(f.read())
softmax = tf.import_graph_def(
original_graph_def,
name='inception',
input_map={GRAPH_INPUT_TENSOR: image_normalized},
return_elements=[GRAPH_PROBABILITIES_TENSOR])
# We're constructing a graph that accepts a single image (as opposed to a
# batch of images), so might as well make the output be a vector of
# probabilities, instead of a batch of vectors with batch size 1.
output_probabilities = tf.squeeze(softmax, name='probabilities')
# Overwrite the graph.
with open(os.path.join(LOCAL_DIR, 'graph.pb'), 'w') as f:
f.write(g.as_graph_def().SerializeToString())
print('------------------------------------------------------------')
print('MODEL GRAPH : graph.pb')
print('LABELS : labels.txt')
print('INPUT TENSOR : %s' % input_image.op.name)
print('OUTPUT TENSOR: %s' % output_probabilities.op.name)
if __name__ == '__main__':
if not os.path.exists(LOCAL_DIR):
os.makedirs(LOCAL_DIR)
download()
patch_graph()
#!/bin/bash
DIR="$(cd "$(dirname "$0")" && pwd -P)"
docker run -it -v ${DIR}:/x -w /x --rm tensorflow/tensorflow:1.4.0 python download.py
#!/bin/bash
DIR=$(dirname $0)
mkdir -p ${DIR}/images
cd ${DIR}/images
# Some random images
curl -o "porcupine.jpg" -L "https://cdn.pixabay.com/photo/2014/11/06/12/46/porcupines-519145_960_720.jpg"
curl -o "whale.jpg" -L "https://static.pexels.com/photos/417196/pexels-photo-417196.jpeg"
curl -o "terrier1u.jpg" -L "https://upload.wikimedia.org/wikipedia/commons/3/34/Australian_Terrier_Melly_%282%29.JPG"
curl -o "terrier2.jpg" -L "https://cdn.pixabay.com/photo/2014/05/13/07/44/yorkshire-terrier-343198_960_720.jpg"
<project>
<modelVersion>4.0.0</modelVersion>
<groupId>org.myorg</groupId>
<artifactId>label-image</artifactId>
<version>1.0-SNAPSHOT</version>
<properties>
<exec.mainClass>LabelImage</exec.mainClass>
<!-- The sample code requires at least JDK 1.7. -->
<!-- The maven compiler plugin defaults to a lower version -->
<maven.compiler.source>1.7</maven.compiler.source>
<maven.compiler.target>1.7</maven.compiler.target>
</properties>
<dependencies>
<dependency>
<groupId>org.tensorflow</groupId>
<artifactId>tensorflow</artifactId>
<version>1.4.0</version>
</dependency>
<!-- For ByteStreams.toByteArray: https://google.github.io/guava/releases/23.0/api/docs/com/google/common/io/ByteStreams.html -->
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
<version>23.6-jre</version>
</dependency>
</dependencies>
</project>
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
import com.google.common.io.ByteStreams;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.List;
import org.tensorflow.Graph;
import org.tensorflow.Session;
import org.tensorflow.Tensor;
import org.tensorflow.Tensors;
/**
* Simplified version of
* https://github.com/tensorflow/tensorflow/blob/r1.4/tensorflow/java/src/main/java/org/tensorflow/examples/LabelImage.java
*/
public class LabelImage {
public static void main(String[] args) throws Exception {
if (args.length < 1) {
System.err.println("USAGE: Provide a list of image filenames");
System.exit(1);
}
final List<String> labels = loadLabels();
try (Graph graph = new Graph();
Session session = new Session(graph)) {
graph.importGraphDef(loadGraphDef());
float[] probabilities = null;
for (String filename : args) {
byte[] bytes = Files.readAllBytes(Paths.get(filename));
try (Tensor<String> input = Tensors.create(bytes);
Tensor<Float> output =
session
.runner()
.feed("encoded_image_bytes", input)
.fetch("probabilities")
.run()
.get(0)
.expect(Float.class)) {
if (probabilities == null) {
probabilities = new float[(int) output.shape()[0]];
}
output.copyTo(probabilities);
int label = argmax(probabilities);
System.out.printf(
"%-30s --> %-15s (%.2f%% likely)\n",
filename, labels.get(label), probabilities[label] * 100.0);
}
}
}
}
private static byte[] loadGraphDef() throws IOException {
try (InputStream is = LabelImage.class.getClassLoader().getResourceAsStream("graph.pb")) {
return ByteStreams.toByteArray(is);
}
}
private static ArrayList<String> loadLabels() throws IOException {
ArrayList<String> labels = new ArrayList<String>();
String line;
final InputStream is = LabelImage.class.getClassLoader().getResourceAsStream("labels.txt");
try (BufferedReader reader = new BufferedReader(new InputStreamReader(is))) {
while ((line = reader.readLine()) != null) {
labels.add(line);
}
}
return labels;
}
private static int argmax(float[] probabilities) {
int best = 0;
for (int i = 1; i < probabilities.length; ++i) {
if (probabilities[i] > probabilities[best]) {
best = i;
}
}
return best;
}
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment