Unverified Commit 00fa8b12 authored by cclauss's avatar cclauss Committed by GitHub
Browse files

Merge branch 'master' into patch-13

parents 6d257a4f 1f34fcaf
......@@ -22,6 +22,8 @@ columns.
lookup answer (or matrix) is also split into number and word lookup matrix
Author: aneelakantan (Arvind Neelakantan)
"""
from __future__ import print_function
import math
import os
import re
......@@ -56,7 +58,7 @@ def correct_unicode(string):
#string = re.sub("[“”«»]", "\"", string)
#string = re.sub("[•†‡]", "", string)
#string = re.sub("[‐‑–—]", "-", string)
string = re.sub(ur'[\u2E00-\uFFFF]', "", string)
string = re.sub(r'[\u2E00-\uFFFF]', "", string)
string = re.sub("\\s+", " ", string).strip()
return string
......@@ -78,7 +80,7 @@ def full_normalize(string):
# Remove trailing info in brackets
string = re.sub("\[[^\]]*\]", "", string)
# Remove most unicode characters in other languages
string = re.sub(ur'[\u007F-\uFFFF]', "", string.strip())
string = re.sub(r'[\u007F-\uFFFF]', "", string.strip())
# Remove trailing info in parenthesis
string = re.sub("\([^)]*\)$", "", string.strip())
string = final_normalize(string)
......@@ -207,7 +209,7 @@ class WikiQuestionGenerator(object):
self.dev_loader = WikiQuestionLoader(dev_name, root_folder)
self.test_loader = WikiQuestionLoader(test_name, root_folder)
self.bad_examples = 0
self.root_folder = root_folder
self.root_folder = root_folder
self.data_folder = os.path.join(self.root_folder, "annotated/data")
self.annotated_examples = {}
self.annotated_tables = {}
......@@ -298,7 +300,7 @@ class WikiQuestionGenerator(object):
question_id, question, target_canon, context)
self.annotated_tables[context] = []
counter += 1
print "Annotated examples loaded ", len(self.annotated_examples)
print("Annotated examples loaded ", len(self.annotated_examples))
f.close()
def is_number_column(self, a):
......
......@@ -20,6 +20,7 @@ import sys
import time
import numpy as np
from six.moves import xrange
import tensorflow as tf
import model as cross_conv_model
......
......@@ -18,6 +18,7 @@ import random
import sys
import numpy as np
from six.moves import xrange
import tensorflow as tf
......
......@@ -20,6 +20,7 @@ https://arxiv.org/pdf/1607.02586v1.pdf
import math
import sys
from six.moves import xrange
import tensorflow as tf
slim = tf.contrib.slim
......
......@@ -15,6 +15,7 @@
"""Read image sequence."""
from six.moves import xrange
import tensorflow as tf
......
......@@ -21,6 +21,7 @@ import sys
import numpy as np
import scipy.misc
from six.moves import xrange
import tensorflow as tf
......
......@@ -96,7 +96,7 @@ def main(_):
tf_example = oid_tfrecord_creation.tf_example_from_annotations_data_frame(
image_annotations, label_map, encoded_image)
if tf_example:
shard_idx = long(image_id, 16) % FLAGS.num_shards
shard_idx = int(image_id, 16) % FLAGS.num_shards
output_tfrecords[shard_idx].write(tf_example.SerializeToString())
......
......@@ -160,8 +160,6 @@ def dict_to_tf_example(data,
if not faces_only:
mask_remapped = mask_np != 2
masks.append(mask_remapped)
mask_stack = np.stack(masks).astype(np.float32)
masks_flattened = np.reshape(mask_stack, [-1])
feature_dict = {
'image/height': dataset_util.int64_feature(height),
......@@ -184,8 +182,11 @@ def dict_to_tf_example(data,
'image/object/view': dataset_util.bytes_list_feature(poses),
}
if not faces_only:
mask_stack = np.stack(masks).astype(np.float32)
masks_flattened = np.reshape(mask_stack, [-1])
feature_dict['image/object/mask'] = (
dataset_util.float_list_feature(masks_flattened.tolist()))
example = tf.train.Example(features=tf.train.Features(feature=feature_dict))
return example
......
......@@ -18,6 +18,7 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange
import tensorflow as tf
from object_detection.core import standard_fields
......
......@@ -103,7 +103,10 @@ def main(unused_argv):
model_config = configs['model']
eval_config = configs['eval_config']
input_config = configs['eval_input_config']
if FLAGS.eval_training_data:
input_config = configs['train_input_config']
else:
input_config = configs['eval_input_config']
model_fn = functools.partial(
model_builder.build,
......
......@@ -69,7 +69,7 @@ def freeze_graph_with_def_protos(
if optimize_graph:
logging.info('Graph Rewriter optimizations enabled')
rewrite_options = rewriter_config_pb2.RewriterConfig(
optimize_tensor_layout=True)
layout_optimizer=rewriter_config_pb2.RewriterConfig.ON)
rewrite_options.optimizers.append('pruning')
rewrite_options.optimizers.append('constfold')
rewrite_options.optimizers.append('layout')
......
......@@ -8,7 +8,7 @@ graph proto. A checkpoint will typically consist of three files:
* model.ckpt-${CHECKPOINT_NUMBER}.meta
After you've identified a candidate checkpoint to export, run the following
command from tensorflow/models/research/object_detection:
command from tensorflow/models/research/:
``` bash
# From tensorflow/models/research/
......
......@@ -42,7 +42,7 @@ job using GPUs. A sample YAML file is given below:
```
trainingInput:
runtimeVersion: "1.0"
runtimeVersion: "1.2"
scaleTier: CUSTOM
masterType: standard_gpu
workerCount: 9
......@@ -71,6 +71,7 @@ following command:
``` bash
# From tensorflow/models/research/
gcloud ml-engine jobs submit training object_detection_`date +%s` \
--runtime-version 1.2 \
--job-dir=gs://${TRAIN_DIR} \
--packages dist/object_detection-0.1.tar.gz,slim/dist/slim-0.1.tar.gz \
--module-name object_detection.train \
......@@ -90,6 +91,8 @@ Google Cloud Storage.
Users can monitor the progress of their training job on the [ML Engine
Dashboard](https://console.cloud.google.com/mlengine/jobs).
Note: This sample is supported for use with 1.2 runtime version.
## Running an Evaluation Job on Cloud
Evaluation jobs run on a single machine, so it is not necessary to write a YAML
......@@ -98,6 +101,7 @@ job:
``` bash
gcloud ml-engine jobs submit training object_detection_eval_`date +%s` \
--runtime-version 1.2 \
--job-dir=gs://${TRAIN_DIR} \
--packages dist/object_detection-0.1.tar.gz,slim/dist/slim-0.1.tar.gz \
--module-name object_detection.eval \
......
......@@ -81,7 +81,7 @@ Oxford-IIIT Pet dataset into TFRecords. Run the following commands from the
``` bash
# From tensorflow/models/research/
python object_detection/create_pet_tf_record.py \
python object_detection/dataset_tools/create_pet_tf_record.py \
--label_map_path=object_detection/data/pet_label_map.pbtxt \
--data_dir=`pwd` \
--output_dir=`pwd`
......@@ -203,12 +203,15 @@ For running the training Cloud ML job, we'll configure the cluster to use 10
training jobs (1 master + 9 workers) and three parameters servers. The
configuration file can be found at `object_detection/samples/cloud/cloud.yml`.
Note: This sample is supported for use with 1.2 runtime version.
To start training, execute the following command from the
`tensorflow/models/research/` directory:
``` bash
# From tensorflow/models/research/
gcloud ml-engine jobs submit training `whoami`_object_detection_`date +%s` \
--runtime-version 1.2 \
--job-dir=gs://${YOUR_GCS_BUCKET}/train \
--packages dist/object_detection-0.1.tar.gz,slim/dist/slim-0.1.tar.gz \
--module-name object_detection.train \
......@@ -224,6 +227,7 @@ Once training has started, we can run an evaluation concurrently:
``` bash
# From tensorflow/models/research/
gcloud ml-engine jobs submit training `whoami`_object_detection_eval_`date +%s` \
--runtime-version 1.2 \
--job-dir=gs://${YOUR_GCS_BUCKET}/train \
--packages dist/object_detection-0.1.tar.gz,slim/dist/slim-0.1.tar.gz \
--module-name object_detection.eval \
......
......@@ -36,8 +36,8 @@
"from matplotlib import pyplot as plt\n",
"from PIL import Image\n",
"\n",
"if tf.__version__ != '1.4.0':\n",
" raise ImportError('Please upgrade your tensorflow installation to v1.4.0!')\n"
"if tf.__version__ < '1.4.0':\n",
" raise ImportError('Please upgrade your tensorflow installation to v1.4.* or later!')\n"
]
},
{
......
......@@ -21,6 +21,7 @@ Example box operations that are supported:
"""
import numpy as np
from six.moves import xrange
from object_detection.utils import np_box_list
from object_detection.utils import np_box_ops
......
......@@ -203,9 +203,9 @@ def padded_one_hot_encoding(indices, depth, left_pad):
TODO: add runtime checks for depth and indices.
"""
if depth < 0 or not isinstance(depth, (int, long) if six.PY2 else int):
if depth < 0 or not isinstance(depth, six.integer_types):
raise ValueError('`depth` must be a non-negative integer.')
if left_pad < 0 or not isinstance(left_pad, (int, long) if six.PY2 else int):
if left_pad < 0 or not isinstance(left_pad, six.integer_types):
raise ValueError('`left_pad` must be a non-negative integer.')
if depth == 0:
return None
......
......@@ -145,7 +145,7 @@ class VisualizationUtilsTest(tf.test.TestCase):
for i in range(images_with_boxes_np.shape[0]):
img_name = 'image_' + str(i) + '.png'
output_file = os.path.join(self.get_temp_dir(), img_name)
print 'Writing output image %d to %s' % (i, output_file)
print('Writing output image %d to %s' % (i, output_file))
image_pil = Image.fromarray(images_with_boxes_np[i, ...])
image_pil.save(output_file)
......
......@@ -67,20 +67,27 @@ python trainer.py --logtostderr --batch_size=25 --env=HalfCheetah-v1 \
--max_divergence=0.05 --value_opt=best_fit --critic_weight=0.0 \
```
Run Mujoco task with Trust-PCL:
To run Mujoco task using Trust-PCL (off-policy) use the below command.
It should work well across all environments, given that you
search sufficiently among
(1) max_divergence (0.001, 0.0005, 0.002 are good values),
(2) rollout (1, 5, 10 are good values),
(3) tf_seed (need to average over enough random seeds).
```
python trainer.py --logtostderr --batch_size=1 --env=HalfCheetah-v1 \
--validation_frequency=50 --rollout=10 --critic_weight=0.0 \
--gamma=0.995 --clip_norm=40 --learning_rate=0.002 \
--replay_buffer_freq=1 --replay_buffer_size=20000 \
--replay_buffer_alpha=0.1 --norecurrent --objective=pcl \
--max_step=100 --tau=0.0 --eviction=fifo --max_divergence=0.001 \
--internal_dim=64 --cutoff_agent=1000 \
--replay_batch_size=25 --nouse_online_batch --batch_by_steps \
--sample_from=target --value_opt=grad --value_hidden_layers=2 \
--update_eps_lambda --unify_episodes --clip_adv=1.0 \
--target_network_lag=0.99 --prioritize_by=step
--validation_frequency=250 --rollout=1 --critic_weight=1.0 --gamma=0.995 \
--clip_norm=40 --learning_rate=0.0001 --replay_buffer_freq=1 \
--replay_buffer_size=5000 --replay_buffer_alpha=0.001 --norecurrent \
--objective=pcl --max_step=10 --cutoff_agent=1000 --tau=0.0 --eviction=fifo \
--max_divergence=0.001 --internal_dim=256 --replay_batch_size=64 \
--nouse_online_batch --batch_by_steps --value_hidden_layers=2 \
--update_eps_lambda --nounify_episodes --target_network_lag=0.99 \
--sample_from=online --clip_adv=1 --prioritize_by=step --num_steps=1000000 \
--noinput_prev_actions --use_target_values --tf_seed=57
```
Run Mujoco task with PCL constraint trust region:
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment