"git@developer.sourcefind.cn:OpenDAS/ollama.git" did not exist on "4e1ff6dcbbc61b0cd266885d50faaaf8c7f667de"
Commit 87a8abd4 authored by Ivan Bogatyy's avatar Ivan Bogatyy Committed by calberti
Browse files

Sync SyntaxNet with TensorFlow r1.0 (#1062)

* Sync SyntaxNet with TensorFlow r1.0

* Fix typo back

* Fix Dockerfile to match TensorFlow 1.0
parent afb2291b
...@@ -15,7 +15,7 @@ RUN mkdir -p $SYNTAXNETDIR \ ...@@ -15,7 +15,7 @@ RUN mkdir -p $SYNTAXNETDIR \
&& ./bazel-0.4.3-installer-linux-x86_64.sh --user \ && ./bazel-0.4.3-installer-linux-x86_64.sh --user \
&& git clone --recursive https://github.com/tensorflow/models.git \ && git clone --recursive https://github.com/tensorflow/models.git \
&& cd $SYNTAXNETDIR/models/syntaxnet/tensorflow \ && cd $SYNTAXNETDIR/models/syntaxnet/tensorflow \
&& echo -e "\n\n\n\n\n\n" | ./configure \ && echo -e "\n\n\n\n\n\n\n\n\n" | ./configure \
&& apt-get autoremove -y \ && apt-get autoremove -y \
&& apt-get clean && apt-get clean
......
...@@ -628,9 +628,11 @@ Original authors of the code in this package include (in alphabetical order): ...@@ -628,9 +628,11 @@ Original authors of the code in this package include (in alphabetical order):
* David Weiss * David Weiss
* Emily Pitler * Emily Pitler
* Greg Coppola * Greg Coppola
* Ivan Bogatyy
* Ji Ma * Ji Ma
* Keith Hall * Keith Hall
* Kuzman Ganchev * Kuzman Ganchev
* Livio Baldini Soares
* Michael Collins * Michael Collins
* Michael Ringgaard * Michael Ringgaard
* Ryan McDonald * Ryan McDonald
......
...@@ -4,8 +4,10 @@ local_repository( ...@@ -4,8 +4,10 @@ local_repository(
) )
load("@org_tensorflow//tensorflow:workspace.bzl", "tf_workspace") load("@org_tensorflow//tensorflow:workspace.bzl", "tf_workspace")
tf_workspace() tf_workspace(path_prefix="", tf_repo_name="org_tensorflow")
# Specify the minimum required Bazel version. # Test that Bazel is the correct release, tested for compatibility with the
load("@org_tensorflow//tensorflow:tensorflow.bzl", "check_version") # current SyntaxNet snapshot.
load("//syntaxnet:syntaxnet.bzl", "check_version")
check_version("0.4.3") check_version("0.4.3")
...@@ -69,7 +69,7 @@ def EmbeddingLookupFeatures(params, sparse_features, allow_weights): ...@@ -69,7 +69,7 @@ def EmbeddingLookupFeatures(params, sparse_features, allow_weights):
if allow_weights: if allow_weights:
# Multiply by weights, reshaping to allow broadcast. # Multiply by weights, reshaping to allow broadcast.
broadcast_weights_shape = tf.concat_v2([tf.shape(weights), [1]], 0) broadcast_weights_shape = tf.concat([tf.shape(weights), [1]], 0)
embeddings *= tf.reshape(weights, broadcast_weights_shape) embeddings *= tf.reshape(weights, broadcast_weights_shape)
# Sum embeddings by index. # Sum embeddings by index.
...@@ -251,7 +251,7 @@ class GreedyParser(object): ...@@ -251,7 +251,7 @@ class GreedyParser(object):
self._averaging[name + '_avg_update'] = ema.apply([param]) self._averaging[name + '_avg_update'] = ema.apply([param])
self.variables[name + '_avg_var'] = ema.average(param) self.variables[name + '_avg_var'] = ema.average(param)
self.inits[name + '_avg_init'] = state_ops.init_variable( self.inits[name + '_avg_init'] = state_ops.init_variable(
ema.average(param), tf.zeros_initializer) ema.average(param), tf.zeros_initializer())
return (self.variables[name + '_avg_var'] if return_average else return (self.variables[name + '_avg_var'] if return_average else
self.params[name]) self.params[name])
...@@ -330,7 +330,7 @@ class GreedyParser(object): ...@@ -330,7 +330,7 @@ class GreedyParser(object):
i, i,
return_average=return_average)) return_average=return_average))
last_layer = tf.concat_v2(embeddings, 1) last_layer = tf.concat(embeddings, 1)
last_layer_size = self.embedding_size last_layer_size = self.embedding_size
# Create ReLU layers. # Create ReLU layers.
...@@ -364,7 +364,7 @@ class GreedyParser(object): ...@@ -364,7 +364,7 @@ class GreedyParser(object):
[self._num_actions], [self._num_actions],
tf.float32, tf.float32,
'softmax_bias', 'softmax_bias',
tf.zeros_initializer, tf.zeros_initializer(),
return_average=return_average) return_average=return_average)
logits = tf.nn.xw_plus_b(last_layer, logits = tf.nn.xw_plus_b(last_layer,
softmax_weight, softmax_weight,
...@@ -530,7 +530,7 @@ class GreedyParser(object): ...@@ -530,7 +530,7 @@ class GreedyParser(object):
for param in trainable_params: for param in trainable_params:
slot = optimizer.get_slot(param, 'momentum') slot = optimizer.get_slot(param, 'momentum')
self.inits[slot.name] = state_ops.init_variable(slot, self.inits[slot.name] = state_ops.init_variable(slot,
tf.zeros_initializer) tf.zeros_initializer())
self.variables[slot.name] = slot self.variables[slot.name] = slot
numerical_checks = [ numerical_checks = [
tf.check_numerics(param, tf.check_numerics(param,
......
...@@ -37,7 +37,7 @@ def AddCrossEntropy(batch_size, n): ...@@ -37,7 +37,7 @@ def AddCrossEntropy(batch_size, n):
for beam_id in range(batch_size): for beam_id in range(batch_size):
beam_gold_slot = tf.reshape( beam_gold_slot = tf.reshape(
tf.strided_slice(n['gold_slot'], [beam_id], [beam_id + 1], [1]), [1]) tf.strided_slice(n['gold_slot'], [beam_id], [beam_id + 1]), [1])
def _ComputeCrossEntropy(): def _ComputeCrossEntropy():
"""Adds ops to compute cross entropy of the gold path in a beam.""" """Adds ops to compute cross entropy of the gold path in a beam."""
# Requires a cast so that UnsortedSegmentSum, in the gradient, # Requires a cast so that UnsortedSegmentSum, in the gradient,
...@@ -144,7 +144,7 @@ class StructuredGraphBuilder(graph_builder.GreedyParser): ...@@ -144,7 +144,7 @@ class StructuredGraphBuilder(graph_builder.GreedyParser):
n = self.training n = self.training
n['accumulated_alive_steps'] = self._AddVariable( n['accumulated_alive_steps'] = self._AddVariable(
[batch_size], tf.int32, 'accumulated_alive_steps', [batch_size], tf.int32, 'accumulated_alive_steps',
tf.zeros_initializer) tf.zeros_initializer())
n.update(self._AddBeamReader(task_context, batch_size, corpus_name)) n.update(self._AddBeamReader(task_context, batch_size, corpus_name))
# This adds a required 'step' node too: # This adds a required 'step' node too:
learning_rate = tf.constant(learning_rate, dtype=tf.float32) learning_rate = tf.constant(learning_rate, dtype=tf.float32)
...@@ -200,7 +200,7 @@ class StructuredGraphBuilder(graph_builder.GreedyParser): ...@@ -200,7 +200,7 @@ class StructuredGraphBuilder(graph_builder.GreedyParser):
for param in trainable_params.values(): for param in trainable_params.values():
slot = optimizer.get_slot(param, 'momentum') slot = optimizer.get_slot(param, 'momentum')
self.inits[slot.name] = state_ops.init_variable(slot, self.inits[slot.name] = state_ops.init_variable(slot,
tf.zeros_initializer) tf.zeros_initializer())
self.variables[slot.name] = slot self.variables[slot.name] = slot
def NumericalChecks(): def NumericalChecks():
......
...@@ -16,6 +16,15 @@ ...@@ -16,6 +16,15 @@
load("@protobuf//:protobuf.bzl", "cc_proto_library") load("@protobuf//:protobuf.bzl", "cc_proto_library")
load("@protobuf//:protobuf.bzl", "py_proto_library") load("@protobuf//:protobuf.bzl", "py_proto_library")
def check_version(expected_version):
current_version = native.bazel_version.split(" ")[0].split("-")[0]
if current_version != expected_version:
fail("\nCurrent Bazel version is {}, expected {}.\n".format(
current_version, expected_version) +
"To try anyway, remove check_version() call from syntaxnet/WORKSPACE")
else:
print("Bazel OK")
def if_cuda(if_true, if_false = []): def if_cuda(if_true, if_false = []):
"""Shorthand for select()'ing on whether we're building with CUDA.""" """Shorthand for select()'ing on whether we're building with CUDA."""
return select({ return select({
......
Subproject commit 45ab528211c962b19e12f6b77165848310271624 Subproject commit 950db43bfce2110767d832138438f266732adca6
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment