".github/git@developer.sourcefind.cn:change/sglang.git" did not exist on "480d1b8b203ad54712eaf65d7e5cd5e74c8b836a"
Commit 87a8abd4 authored by Ivan Bogatyy's avatar Ivan Bogatyy Committed by calberti
Browse files

Sync SyntaxNet with TensorFlow r1.0 (#1062)

* Sync SyntaxNet with TensorFlow r1.0

* Fix typo back

* Fix Dockerfile to match TensorFlow 1.0
parent afb2291b
......@@ -15,7 +15,7 @@ RUN mkdir -p $SYNTAXNETDIR \
&& ./bazel-0.4.3-installer-linux-x86_64.sh --user \
&& git clone --recursive https://github.com/tensorflow/models.git \
&& cd $SYNTAXNETDIR/models/syntaxnet/tensorflow \
&& echo -e "\n\n\n\n\n\n" | ./configure \
&& echo -e "\n\n\n\n\n\n\n\n\n" | ./configure \
&& apt-get autoremove -y \
&& apt-get clean
......
......@@ -628,9 +628,11 @@ Original authors of the code in this package include (in alphabetical order):
* David Weiss
* Emily Pitler
* Greg Coppola
* Ivan Bogatyy
* Ji Ma
* Keith Hall
* Kuzman Ganchev
* Livio Baldini Soares
* Michael Collins
* Michael Ringgaard
* Ryan McDonald
......
......@@ -4,8 +4,10 @@ local_repository(
)
load("@org_tensorflow//tensorflow:workspace.bzl", "tf_workspace")
tf_workspace()
tf_workspace(path_prefix="", tf_repo_name="org_tensorflow")
# Specify the minimum required Bazel version.
load("@org_tensorflow//tensorflow:tensorflow.bzl", "check_version")
# Test that Bazel is the correct release, tested for compatibility with the
# current SyntaxNet snapshot.
load("//syntaxnet:syntaxnet.bzl", "check_version")
check_version("0.4.3")
......@@ -69,7 +69,7 @@ def EmbeddingLookupFeatures(params, sparse_features, allow_weights):
if allow_weights:
# Multiply by weights, reshaping to allow broadcast.
broadcast_weights_shape = tf.concat_v2([tf.shape(weights), [1]], 0)
broadcast_weights_shape = tf.concat([tf.shape(weights), [1]], 0)
embeddings *= tf.reshape(weights, broadcast_weights_shape)
# Sum embeddings by index.
......@@ -251,7 +251,7 @@ class GreedyParser(object):
self._averaging[name + '_avg_update'] = ema.apply([param])
self.variables[name + '_avg_var'] = ema.average(param)
self.inits[name + '_avg_init'] = state_ops.init_variable(
ema.average(param), tf.zeros_initializer)
ema.average(param), tf.zeros_initializer())
return (self.variables[name + '_avg_var'] if return_average else
self.params[name])
......@@ -330,7 +330,7 @@ class GreedyParser(object):
i,
return_average=return_average))
last_layer = tf.concat_v2(embeddings, 1)
last_layer = tf.concat(embeddings, 1)
last_layer_size = self.embedding_size
# Create ReLU layers.
......@@ -364,7 +364,7 @@ class GreedyParser(object):
[self._num_actions],
tf.float32,
'softmax_bias',
tf.zeros_initializer,
tf.zeros_initializer(),
return_average=return_average)
logits = tf.nn.xw_plus_b(last_layer,
softmax_weight,
......@@ -530,7 +530,7 @@ class GreedyParser(object):
for param in trainable_params:
slot = optimizer.get_slot(param, 'momentum')
self.inits[slot.name] = state_ops.init_variable(slot,
tf.zeros_initializer)
tf.zeros_initializer())
self.variables[slot.name] = slot
numerical_checks = [
tf.check_numerics(param,
......
......@@ -37,7 +37,7 @@ def AddCrossEntropy(batch_size, n):
for beam_id in range(batch_size):
beam_gold_slot = tf.reshape(
tf.strided_slice(n['gold_slot'], [beam_id], [beam_id + 1], [1]), [1])
tf.strided_slice(n['gold_slot'], [beam_id], [beam_id + 1]), [1])
def _ComputeCrossEntropy():
"""Adds ops to compute cross entropy of the gold path in a beam."""
# Requires a cast so that UnsortedSegmentSum, in the gradient,
......@@ -144,7 +144,7 @@ class StructuredGraphBuilder(graph_builder.GreedyParser):
n = self.training
n['accumulated_alive_steps'] = self._AddVariable(
[batch_size], tf.int32, 'accumulated_alive_steps',
tf.zeros_initializer)
tf.zeros_initializer())
n.update(self._AddBeamReader(task_context, batch_size, corpus_name))
# This adds a required 'step' node too:
learning_rate = tf.constant(learning_rate, dtype=tf.float32)
......@@ -200,7 +200,7 @@ class StructuredGraphBuilder(graph_builder.GreedyParser):
for param in trainable_params.values():
slot = optimizer.get_slot(param, 'momentum')
self.inits[slot.name] = state_ops.init_variable(slot,
tf.zeros_initializer)
tf.zeros_initializer())
self.variables[slot.name] = slot
def NumericalChecks():
......
......@@ -16,6 +16,15 @@
load("@protobuf//:protobuf.bzl", "cc_proto_library")
load("@protobuf//:protobuf.bzl", "py_proto_library")
def check_version(expected_version):
current_version = native.bazel_version.split(" ")[0].split("-")[0]
if current_version != expected_version:
fail("\nCurrent Bazel version is {}, expected {}.\n".format(
current_version, expected_version) +
"To try anyway, remove check_version() call from syntaxnet/WORKSPACE")
else:
print("Bazel OK")
def if_cuda(if_true, if_false = []):
"""Shorthand for select()'ing on whether we're building with CUDA."""
return select({
......
Subproject commit 45ab528211c962b19e12f6b77165848310271624
Subproject commit 950db43bfce2110767d832138438f266732adca6
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment