Commit 3a0086fa authored by Zhilin Yang's avatar Zhilin Yang
Browse files

init

parents
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to average values of variables in a list of checkpoint files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import six
from six.moves import zip # pylint: disable=redefined-builtin
import tensorflow as tf
flags = tf.flags
FLAGS = flags.FLAGS
flags.DEFINE_string("checkpoints", "",
"Comma-separated list of checkpoints to average.")
flags.DEFINE_integer("num_last_checkpoints", 0,
"Averages the last N saved checkpoints."
" If the checkpoints flag is set, this is ignored.")
flags.DEFINE_string("prefix", "",
"Prefix (e.g., directory) to append to each checkpoint.")
flags.DEFINE_string("output_path", "/tmp/averaged.ckpt",
"Path to output the averaged checkpoint to.")
def checkpoint_exists(path):
return (tf.gfile.Exists(path) or tf.gfile.Exists(path + ".meta") or
tf.gfile.Exists(path + ".index"))
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
if FLAGS.checkpoints:
# Get the checkpoints list from flags and run some basic checks.
checkpoints = [c.strip() for c in FLAGS.checkpoints.split(",")]
checkpoints = [c for c in checkpoints if c]
if not checkpoints:
raise ValueError("No checkpoints provided for averaging.")
if FLAGS.prefix:
checkpoints = [FLAGS.prefix + c for c in checkpoints]
else:
assert FLAGS.num_last_checkpoints >= 1, "Must average at least one model"
assert FLAGS.prefix, ("Prefix must be provided when averaging last"
" N checkpoints")
checkpoint_state = tf.train.get_checkpoint_state(
os.path.dirname(FLAGS.prefix))
# Checkpoints are ordered from oldest to newest.
checkpoints = checkpoint_state.all_model_checkpoint_paths[
-FLAGS.num_last_checkpoints:]
checkpoints = [c for c in checkpoints if checkpoint_exists(c)]
if not checkpoints:
if FLAGS.checkpoints:
raise ValueError(
"None of the provided checkpoints exist. %s" % FLAGS.checkpoints)
else:
raise ValueError("Could not find checkpoints at %s" %
os.path.dirname(FLAGS.prefix))
# Read variables from all checkpoints and average them.
tf.logging.info("Reading variables and averaging checkpoints:")
for c in checkpoints:
tf.logging.info("%s ", c)
var_list = tf.contrib.framework.list_variables(checkpoints[0])
var_values, var_dtypes = {}, {}
for (name, shape) in var_list:
if not name.startswith("global_step"):
var_values[name] = np.zeros(shape)
for checkpoint in checkpoints:
reader = tf.contrib.framework.load_checkpoint(checkpoint)
for name in var_values:
tensor = reader.get_tensor(name)
var_dtypes[name] = tensor.dtype
var_values[name] += tensor
tf.logging.info("Read from checkpoint %s", checkpoint)
for name in var_values: # Average.
var_values[name] /= len(checkpoints)
with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):
tf_vars = [
tf.get_variable(v, shape=var_values[v].shape, dtype=var_dtypes[v])
for v in var_values
]
placeholders = [tf.placeholder(v.dtype, shape=v.shape) for v in tf_vars]
assign_ops = [tf.assign(v, p) for (v, p) in zip(tf_vars, placeholders)]
global_step = tf.Variable(
0, name="global_step", trainable=False, dtype=tf.int64)
saver = tf.train.Saver(tf.all_variables())
# Build a model consisting only of variables, set them to the average values.
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
for p, assign_op, (name, value) in zip(placeholders, assign_ops,
six.iteritems(var_values)):
sess.run(assign_op, {p: value})
# Use the built saver to save the averaged checkpoint.
saver.save(sess, FLAGS.output_path, global_step=global_step)
tf.logging.info("Averaged checkpoints saved in %s", FLAGS.output_path)
if __name__ == "__main__":
tf.app.run()
This diff is collapsed.
import os
import tensorflow as tf
def assign_to_gpu(gpu=0, ps_dev="/device:CPU:0"):
def _assign(op):
node_def = op if isinstance(op, tf.NodeDef) else op.node_def
if node_def.op == "Variable":
return ps_dev
else:
return "/gpu:%d" % gpu
return _assign
def average_grads_and_vars(tower_grads_and_vars):
def average_dense(grad_and_vars):
if len(grad_and_vars) == 1:
return grad_and_vars[0][0]
grad = grad_and_vars[0][0]
for g, _ in grad_and_vars[1:]:
grad += g
return grad / len(grad_and_vars)
def average_sparse(grad_and_vars):
if len(grad_and_vars) == 1:
return grad_and_vars[0][0]
indices = []
values = []
for g, _ in grad_and_vars:
indices += [g.indices]
values += [g.values]
indices = tf.concat(indices, 0)
values = tf.concat(values, 0) / len(grad_and_vars)
return tf.IndexedSlices(values, indices, grad_and_vars[0][0].dense_shape)
average_grads_and_vars = []
for grad_and_vars in zip(*tower_grads_and_vars):
if grad_and_vars[0][0] is None:
grad = None
elif isinstance(grad_and_vars[0][0], tf.IndexedSlices):
grad = average_sparse(grad_and_vars)
else:
grad = average_dense(grad_and_vars)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads_and_vars.append(grad_and_var)
return average_grads_and_vars
def load_from_checkpoint(saver, logdir):
sess = tf.get_default_session()
ckpt = tf.train.get_checkpoint_state(logdir)
if ckpt and ckpt.model_checkpoint_path:
if os.path.isabs(ckpt.model_checkpoint_path):
# Restores from checkpoint with absolute path.
saver.restore(sess, ckpt.model_checkpoint_path)
else:
# Restores from checkpoint with relative path.
saver.restore(sess, os.path.join(logdir, ckpt.model_checkpoint_path))
return True
return False
This diff is collapsed.
#!/bin/bash
# Data
DATA_ROOT=../data/enwik8/
# Model
N_LAYER=24
D_MODEL=1024
D_EMBED=1024
N_HEAD=8
D_HEAD=128
D_INNER=3072
# Training
TGT_LEN=256
MEM_LEN=256
BSZ=16
NUM_CORE=2
# Testing
TEST_TGT_LEN=128
TEST_MEM_LEN=3800
TEST_CLAMP_LEN=1000
TEST_BSZ=16
TEST_NUM_CORE=4
if [[ $1 == 'train_data' ]]; then
python data_utils.py \
--data_dir=${DATA_ROOT}/ \
--dataset=enwik8 \
--tgt_len=${TGT_LEN} \
--per_host_train_bsz=${BSZ} \
--per_host_valid_bsz=${BSZ} \
--num_passes=1 \
--use_tpu=False \
${@:2}
elif [[ $1 == 'test_data' ]]; then
python data_utils.py \
--data_dir=${DATA_ROOT}/ \
--dataset=enwik8 \
--tgt_len=${TEST_TGT_LEN} \
--per_host_test_bsz=${TEST_BSZ} \
--num_passes=1 \
--use_tpu=False \
${@:2}
elif [[ $1 == 'train' ]]; then
echo 'Run training...'
python train_gpu.py \
--data_dir=${DATA_ROOT}/tfrecords \
--record_info_dir=${DATA_ROOT}/tfrecords/ \
--corpus_info_path=${DATA_ROOT}/corpus-info.json \
--model_dir=EXP-enwik8 \
--n_layer=${N_LAYER} \
--d_model=${D_MODEL} \
--d_embed=${D_EMBED} \
--n_head=${N_HEAD} \
--d_head=${D_HEAD} \
--d_inner=${D_INNER} \
--dropout=0.1 \
--dropatt=0.0 \
--learning_rate=0.00025 \
--warmup_steps=0 \
--train_steps=400000 \
--tgt_len=${TGT_LEN} \
--mem_len=${MEM_LEN} \
--train_batch_size=${BSZ} \
--num_core_per_host=${NUM_CORE} \
--iterations=200 \
--save_steps=200 \
--do_train=True \
--do_eval=False \
${@:2}
elif [[ $1 == 'eval' ]]; then
echo 'Run evaluation...'
python train_gpu.py \
--data_dir=${DATA_ROOT}/tfrecords \
--record_info_dir=${DATA_ROOT}/tfrecords/ \
--corpus_info_path=${DATA_ROOT}/corpus-info.json \
--model_dir=EXP-enwik8 \
--n_layer=${N_LAYER} \
--d_model=${D_MODEL} \
--d_embed=${D_EMBED} \
--n_head=${N_HEAD} \
--d_head=${D_HEAD} \
--d_inner=${D_INNER} \
--dropout=0.0 \
--dropatt=0.0 \
--tgt_len=${TEST_TGT_LEN} \
--mem_len=${TEST_MEM_LEN} \
--clamp_len=${TEST_CLAMP_LEN} \
--same_length=True \
--eval_batch_size=${TEST_BSZ} \
--num_core_per_host=${TEST_NUM_CORE} \
--do_train=False \
--do_eval=True \
--eval_split=test \
${@:2}
else
echo 'unknown argment 1'
fi
#!/bin/bash
# Path
LOCAL_DIR=../data/enwik8/
GSDATA=
GSEXP=
# TPU setting
NUM_HOST=2
NUM_CORE=16 # TPUv2 -> 8 | TPUv3 -> 16
TEST_NUM_HOST=1
TEST_NUM_CORE=8 # TPUv2 -> 8 | TPUv3 -> 16
# Model
N_LAYER=24
D_MODEL=1024
D_EMBED=1024
N_HEAD=8
D_HEAD=128
D_INNER=3072
# Training
TGT_LEN=768
MEM_LEN=768
TRAIN_BSZ=64
VALID_BSZ=64
# Testing
TEST_TGT_LEN=128
TEST_MEM_LEN=3800
TEST_CLAMP_LEN=1000
TEST_BSZ=16
if [[ $1 == 'train_data' ]]; then
python data_utils.py \
--data_dir=${LOCAL_DIR}/ \
--dataset=enwik8 \
--tgt_len=${TGT_LEN} \
--per_host_train_bsz=${TRAIN_BSZ} \
--per_host_valid_bsz=${VALID_BSZ} \
--num_core_per_host=${NUM_CORE} \
--num_passes=10 \
--use_tpu=True \
${@:2}
SRC_PATTERN=train.bsz-${TRAIN_BSZ}.tlen-${TGT_LEN}.core-${NUM_CORE}*
gsutil cp ${LOCAL_DIR}/tfrecords/${SRC_PATTERN} ${GSDATA}/enwik8-tfrecords/
SRC_PATTERN=valid.bsz-${VALID_BSZ}.tlen-${TGT_LEN}.core-${NUM_CORE}*
gsutil cp ${LOCAL_DIR}/tfrecords/${SRC_PATTERN} ${GSDATA}/enwik8-tfrecords/
elif [[ $1 == 'test_data' ]]; then
python data_utils.py \
--data_dir=${LOCAL_DIR}/ \
--dataset=enwik8 \
--tgt_len=${TEST_TGT_LEN} \
--per_host_test_bsz=${TEST_BSZ} \
--num_core_per_host=${TEST_NUM_CORE} \
--num_passes=1 \
--use_tpu=True \
${@:2}
SRC_PATTERN=test.bsz-${TEST_BSZ}.tlen-${TEST_TGT_LEN}.core-${TEST_NUM_CORE}*
gsutil cp ${LOCAL_DIR}/tfrecords/${SRC_PATTERN} ${GSDATA}/enwik8-tfrecords/
elif [[ $1 == 'train' ]]; then
echo 'Run training...'
python train.py \
--data_dir=${GSDATA}/enwik8-tfrecords \
--record_info_dir=${LOCAL_DIR}/tfrecords/ \
--corpus_info_path=${LOCAL_DIR}/corpus-info.json \
--model_dir=${GSEXP}/enwik8 \
--n_layer=${N_LAYER} \
--d_model=${D_MODEL} \
--d_embed=${D_EMBED} \
--n_head=${N_HEAD} \
--d_head=${D_HEAD} \
--d_inner=${D_INNER} \
--dropout=0.15 \
--dropatt=0.15 \
--learning_rate=0.00025 \
--warmup_steps=4000 \
--train_steps=400000 \
--tgt_len=${TGT_LEN} \
--mem_len=${MEM_LEN} \
--train_batch_size=${TRAIN_BSZ} \
--use_tpu=True \
--num_host=${NUM_HOST} \
--num_core_per_host=${NUM_CORE} \
--iterations=1000 \
--save_steps=10000 \
--do_train=True \
--do_eval=False \
${@:2}
elif [[ $1 == 'eval' ]]; then
echo 'Run evaluation...'
python train.py \
--data_dir=${GSDATA}/enwik8-tfrecords \
--record_info_dir=${LOCAL_DIR}/tfrecords/ \
--corpus_info_path=${LOCAL_DIR}/corpus-info.json \
--model_dir=${GSEXP}/enwik8 \
--n_layer=${N_LAYER} \
--d_model=${D_MODEL} \
--d_embed=${D_EMBED} \
--n_head=${N_HEAD} \
--d_head=${D_HEAD} \
--d_inner=${D_INNER} \
--tgt_len=${TEST_TGT_LEN} \
--mem_len=${TEST_MEM_LEN} \
--eval_batch_size=${TEST_BSZ} \
--num_host=${TEST_NUM_HOST} \
--num_core_per_host=${TEST_NUM_CORE} \
--use_tpu=True \
--do_train=False \
--do_eval_only=True \
--eval_split=test \
${@:2}
else
echo 'unknown argment 1'
fi
#!/bin/bash
# Data
DATA_ROOT=../data/one-billion-words/
# Model
DIV_VAL=4
N_LAYER=24
D_MODEL=1280
D_EMBED=1280
N_HEAD=16
D_HEAD=80
D_INNER=8192
# Training
TGT_LEN=256
MEM_LEN=256
BSZ=16
NUM_CORE=2
# Testing
TEST_TGT_LEN=32
TEST_MEM_LEN=128
TEST_CLAMP_LEN=-1
TEST_BSZ=16
TEST_NUM_CORE=1
if [[ $1 == 'train_data' ]]; then
python data_utils.py \
--data_dir=${DATA_ROOT}/ \
--dataset=lm1b \
--tgt_len=${TGT_LEN} \
--per_host_train_bsz=${BSZ} \
--per_host_valid_bsz=${BSZ} \
--num_passes=1 \
--use_tpu=False \
${@:2}
elif [[ $1 == 'test_data' ]]; then
python data_utils.py \
--data_dir=${DATA_ROOT}/ \
--dataset=enwik8 \
--tgt_len=${TEST_TGT_LEN} \
--per_host_test_bsz=${TEST_BSZ} \
--num_passes=1 \
--use_tpu=False \
${@:2}
elif [[ $1 == 'train' ]]; then
echo 'Run training...'
python train_gpu.py \
--data_dir=${DATA_ROOT}/tfrecords \
--record_info_dir=${DATA_ROOT}/tfrecords/ \
--corpus_info_path=${DATA_ROOT}/corpus-info.json \
--model_dir=EXP-lm1b \
--div_val=${DIV_VAL} \
--untie_r=True \
--proj_share_all_but_first=False \
--proj_same_dim=False \
--n_layer=${N_LAYER} \
--d_model=${D_MODEL} \
--d_embed=${D_EMBED} \
--n_head=${N_HEAD} \
--d_head=${D_HEAD} \
--d_inner=${D_INNER} \
--dropout=0.1 \
--dropatt=0.0 \
--learning_rate=0.00025 \
--warmup_steps=0 \
--train_steps=400000 \
--tgt_len=${TGT_LEN} \
--mem_len=${MEM_LEN} \
--train_batch_size=${BSZ} \
--num_core_per_host=${NUM_CORE} \
--iterations=200 \
--save_steps=4000 \
${@:2}
elif [[ $1 == 'eval' ]]; then
echo 'Run evaluation...'
python train_gpu.py \
--data_dir=${DATA_ROOT}/tfrecords \
--record_info_dir=${DATA_ROOT}/tfrecords/ \
--corpus_info_path=${DATA_ROOT}/corpus-info.json \
--model_dir=EXP-lm1b \
--div_val=${DIV_VAL} \
--untie_r=True \
--proj_share_all_but_first=False \
--proj_same_dim=False \
--n_layer=${N_LAYER} \
--d_model=${D_MODEL} \
--d_embed=${D_EMBED} \
--n_head=${N_HEAD} \
--d_head=${D_HEAD} \
--d_inner=${D_INNER} \
--dropout=0.0 \
--dropatt=0.0 \
--tgt_len=${TEST_TGT_LEN} \
--mem_len=${TEST_MEM_LEN} \
--clamp_len=${TEST_CLAMP_LEN} \
--same_length=True \
--eval_batch_size=${TEST_BSZ} \
--num_core_per_host=${TEST_NUM_CORE} \
--do_train=False \
--do_eval=True \
--eval_split=test \
${@:2}
else
echo 'unknown argment 1'
fi
#!/bin/bash
# Path
LOCAL_DIR=../data/one-billion-words/
GSDATA=
GSEXP=
# TPU setting
NUM_HOST=32
NUM_CORE=16 # TPUv2 -> 8 | TPUv3 -> 16
TEST_NUM_HOST=1
TEST_NUM_CORE=8 # TPUv2 -> 8 | TPUv3 -> 16
# Model
DIV_VAL=4
N_LAYER=24
D_MODEL=1280
D_EMBED=1280
N_HEAD=16
D_HEAD=80
D_INNER=8192
# Training
TGT_LEN=32
MEM_LEN=32
TRAIN_BSZ=512
VLIDA_BSZ=512
# Testing
TEST_TGT_LEN=32
TEST_MEM_LEN=128
TEST_CLAMP_LEN=-1
TEST_BSZ=8
if [[ $1 == 'train_data' ]]; then
python data_utils.py \
--data_dir=${LOCAL_DIR}/ \
--dataset=lm1b \
--tgt_len=${TGT_LEN} \
--per_host_train_bsz=${TRAIN_BSZ} \
--per_host_valid_bsz=${VLIDA_BSZ} \
--num_core_per_host=${NUM_CORE} \
--num_passes=10 \
--use_tpu=True \
${@:2}
SRC_PATTERN=train.bsz-${TRAIN_BSZ}.tlen-${TGT_LEN}.core-${NUM_CORE}*
gsutil cp ${LOCAL_DIR}/tfrecords/${SRC_PATTERN} ${GSDATA}/lm1b-tfrecords/
SRC_PATTERN=valid.bsz-${VALID_BSZ}.tlen-${TGT_LEN}.core-${NUM_CORE}*
gsutil cp ${LOCAL_DIR}/tfrecords/${SRC_PATTERN} ${GSDATA}/lm1b-tfrecords/
elif [[ $1 == 'test_data' ]]; then
python data_utils.py \
--data_dir=${LOCAL_DIR}/ \
--dataset=lm1b \
--tgt_len=${TEST_TGT_LEN} \
--per_host_test_bsz=${TEST_BSZ} \
--num_core_per_host=${TEST_NUM_CORE} \
--num_passes=1 \
--use_tpu=True \
${@:2}
SRC_PATTERN=test.bsz-${TEST_BSZ}.tlen-${TEST_TGT_LEN}.core-${TEST_NUM_CORE}*
gsutil cp ${LOCAL_DIR}/tfrecords/${SRC_PATTERN} ${GSDATA}/lm1b-tfrecords/
elif [[ $1 == 'train' ]]; then
echo 'Run training...'
python train.py \
--data_dir=${GSDATA}/lm1b-tfrecords \
--record_info_dir=${LOCAL_DIR}/tfrecords/ \
--corpus_info_path=${LOCAL_DIR}/corpus-info.json \
--model_dir=${GSEXP}/lm1b \
--div_val=${DIV_VAL} \
--untie_r=True \
--proj_share_all_but_first=False \
--proj_same_dim=False \
--n_layer=${N_LAYER} \
--d_model=${D_MODEL} \
--d_embed=${D_EMBED} \
--n_head=${N_HEAD} \
--d_head=${D_HEAD} \
--d_inner=${D_INNER} \
--dropout=0.05 \
--dropatt=0.05 \
--init_std=0.005 \
--learning_rate=0.0001 \
--warmup_steps=30000 \
--train_steps=1200000 \
--tgt_len=${TGT_LEN} \
--mem_len=${MEM_LEN} \
--train_batch_size=${BSZ} \
--num_hosts=${NUM_HOST} \
--num_core_per_host=${NUM_CORE} \
--iterations=1000 \
--save_steps=10000 \
--use_tpu=True \
--do_eval=False \
${@:2}
elif [[ $1 == 'eval' ]]; then
echo 'Run evaluation...'
python train.py \
--data_dir=${GSDATA}/lm1b-tfrecords \
--record_info_dir=${LOCAL_DIR}/tfrecords/ \
--corpus_info_path=${LOCAL_DIR}/corpus-info.json \
--model_dir=${GSEXP}/lm1b \
--div_val=${DIV_VAL} \
--untie_r=True \
--proj_share_all_but_first=False \
--proj_same_dim=False \
--n_layer=${N_LAYER} \
--d_model=${D_MODEL} \
--d_embed=${D_EMBED} \
--n_head=${N_HEAD} \
--d_head=${D_HEAD} \
--d_inner=${D_INNER} \
--tgt_len=${TEST_TGT_LEN} \
--mem_len=${TEST_MEM_LEN} \
--clamp_len=${TEST_CLAMP_LEN} \
--same_length=True \
--eval_batch_size=${TEST_BSZ} \
--num_host=${TEST_NUM_HOST} \
--num_core_per_host=${TEST_NUM_CORE} \
--use_tpu=True \
--do_train=False \
--do_eval_only=True \
--eval_split=test \
${@:2}
else
echo 'unknown argment 1'
fi
#!/bin/bash
# Data
DATA_ROOT=../data/text8/
# Model
N_LAYER=24
D_MODEL=1024
D_EMBED=1024
N_HEAD=8
D_HEAD=128
D_INNER=3072
# Training
TGT_LEN=256
MEM_LEN=256
BSZ=16
NUM_CORE=2
# Testing
TEST_TGT_LEN=128
TEST_MEM_LEN=3800
TEST_CLAMP_LEN=1000
TEST_BSZ=8
TEST_NUM_CORE=2
if [[ $1 == 'train_data' ]]; then
python data_utils.py \
--data_dir=${DATA_ROOT}/ \
--dataset=text8 \
--tgt_len=${TGT_LEN} \
--per_host_train_bsz=${BSZ} \
--per_host_valid_bsz=${BSZ} \
--num_passes=1 \
--use_tpu=False \
${@:2}
elif [[ $1 == 'test_data' ]]; then
python data_utils.py \
--data_dir=${DATA_ROOT}/ \
--dataset=text8 \
--tgt_len=${TEST_TGT_LEN} \
--per_host_test_bsz=${TEST_BSZ} \
--num_passes=1 \
--use_tpu=False \
${@:2}
elif [[ $1 == 'train' ]]; then
echo 'Run training...'
python train_gpu.py \
--data_dir=${DATA_ROOT}/tfrecords \
--record_info_dir=${DATA_ROOT}/tfrecords/ \
--corpus_info_path=${DATA_ROOT}/corpus-info.json \
--model_dir=EXP-text8 \
--n_layer=${N_LAYER} \
--d_model=${D_MODEL} \
--d_embed=${D_EMBED} \
--n_head=${N_HEAD} \
--d_head=${D_HEAD} \
--d_inner=${D_INNER} \
--dropout=0.1 \
--dropatt=0.0 \
--learning_rate=0.00025 \
--warmup_steps=0 \
--train_steps=400000 \
--tgt_len=${TGT_LEN} \
--mem_len=${MEM_LEN} \
--train_batch_size=${BSZ} \
--num_core_per_host=${NUM_CORE} \
--iterations=200 \
--save_steps=200 \
--do_train=True \
--do_eval=False \
${@:2}
elif [[ $1 == 'eval' ]]; then
echo 'Run evaluation...'
python train_gpu.py \
--data_dir=${DATA_ROOT}/tfrecords \
--record_info_dir=${DATA_ROOT}/tfrecords/ \
--corpus_info_path=${DATA_ROOT}/corpus-info.json \
--model_dir=EXP-text8 \
--n_layer=${N_LAYER} \
--d_model=${D_MODEL} \
--d_embed=${D_EMBED} \
--n_head=${N_HEAD} \
--d_head=${D_HEAD} \
--d_inner=${D_INNER} \
--dropout=0.0 \
--dropatt=0.0 \
--tgt_len=${TEST_TGT_LEN} \
--mem_len=${TEST_MEM_LEN} \
--clamp_len=${TEST_CLAMP_LEN} \
--same_length=True \
--eval_batch_size=${TEST_BSZ} \
--num_core_per_host=${TEST_NUM_CORE} \
--do_train=False \
--do_eval=True \
--eval_split=test \
${@:2}
else
echo 'unknown argment 1'
fi
#!/bin/bash
# Path
LOCAL_DIR=../data/text8/
GSDATA=
GSEXP=
# TPU setting
NUM_HOST=2
NUM_CORE=16 # TPUv2 -> 8 | TPUv3 -> 16
TEST_NUM_HOST=1
TEST_NUM_CORE=8 # TPUv2 -> 8 | TPUv3 -> 16
# Model
N_LAYER=24
D_MODEL=1024
D_EMBED=1024
N_HEAD=8
D_HEAD=128
D_INNER=3072
# Training
TGT_LEN=768
MEM_LEN=768
TRAIN_BSZ=64
VALID_BSZ=64
# Testing
TEST_TGT_LEN=128
TEST_MEM_LEN=3800
TEST_CLAMP_LEN=1000
TEST_BSZ=16
if [[ $1 == 'train_data' ]]; then
python data_utils.py \
--data_dir=${LOCAL_DIR}/ \
--dataset=text8 \
--tgt_len=${TGT_LEN} \
--per_host_train_bsz=${TRAIN_BSZ} \
--per_host_valid_bsz=${VALID_BSZ} \
--num_core_per_host=${NUM_CORE} \
--num_passes=10 \
--use_tpu=True \
${@:2}
SRC_PATTERN=train.bsz-${TRAIN_BSZ}.tlen-${TGT_LEN}.core-${NUM_CORE}*
gsutil cp ${LOCAL_DIR}/tfrecords/${SRC_PATTERN} ${GSDATA}/text8-tfrecords/
SRC_PATTERN=valid.bsz-${VALID_BSZ}.tlen-${TGT_LEN}.core-${NUM_CORE}*
gsutil cp ${LOCAL_DIR}/tfrecords/${SRC_PATTERN} ${GSDATA}/text8-tfrecords/
elif [[ $1 == 'test_data' ]]; then
python data_utils.py \
--data_dir=${LOCAL_DIR}/ \
--dataset=text8 \
--tgt_len=${TEST_TGT_LEN} \
--per_host_test_bsz=${TEST_BSZ} \
--num_core_per_host=${TEST_NUM_CORE} \
--num_passes=1 \
--use_tpu=True \
${@:2}
SRC_PATTERN=test.bsz-${TEST_BSZ}.tlen-${TEST_TGT_LEN}.core-${TEST_NUM_CORE}*
gsutil cp ${LOCAL_DIR}/tfrecords/${SRC_PATTERN} ${GSDATA}/text8-tfrecords/
elif [[ $1 == 'train' ]]; then
echo 'Run training...'
python train.py \
--data_dir=${GSDATA}/text8-tfrecords \
--record_info_dir=${LOCAL_DIR}/tfrecords/ \
--corpus_info_path=${LOCAL_DIR}/corpus-info.json \
--model_dir=${GSEXP}/text8 \
--n_layer=${N_LAYER} \
--d_model=${D_MODEL} \
--d_embed=${D_EMBED} \
--n_head=${N_HEAD} \
--d_head=${D_HEAD} \
--d_inner=${D_INNER} \
--dropout=0.15 \
--dropatt=0.15 \
--learning_rate=0.00025 \
--warmup_steps=4000 \
--train_steps=400000 \
--tgt_len=${TGT_LEN} \
--mem_len=${MEM_LEN} \
--train_batch_size=${TRAIN_BSZ} \
--use_tpu=True \
--num_host=${NUM_HOST} \
--num_core_per_host=${NUM_CORE} \
--iterations=1000 \
--save_steps=10000 \
--do_train=True \
--do_eval=False \
${@:2}
elif [[ $1 == 'eval' ]]; then
echo 'Run evaluation...'
python train.py \
--data_dir=${GSDATA}/text8-tfrecords \
--record_info_dir=${LOCAL_DIR}/tfrecords/ \
--corpus_info_path=${LOCAL_DIR}/corpus-info.json \
--model_dir=${GSEXP}/text8 \
--n_layer=${N_LAYER} \
--d_model=${D_MODEL} \
--d_embed=${D_EMBED} \
--n_head=${N_HEAD} \
--d_head=${D_HEAD} \
--d_inner=${D_INNER} \
--tgt_len=${TEST_TGT_LEN} \
--mem_len=${TEST_MEM_LEN} \
--eval_batch_size=${TEST_BSZ} \
--num_host=${TEST_NUM_HOST} \
--num_core_per_host=${TEST_NUM_CORE} \
--use_tpu=True \
--do_train=False \
--do_eval_only=True \
--eval_split=test \
${@:2}
else
echo 'unknown argment 1'
fi
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
#!/bin/bash
# Data
DATA_ROOT=./
DATA_DIR=${DATA_ROOT}/pretrained_xl/tf_enwik8/data
MODEL_DIR=${DATA_ROOT}/pretrained_xl/tf_enwik8/model
# Model
N_LAYER=24
D_MODEL=1024
D_EMBED=1024
N_HEAD=8
D_HEAD=128
D_INNER=3072
# Testing
TEST_TGT_LEN=128
TEST_MEM_LEN=3800
TEST_CLAMP_LEN=1000
TEST_CKPT_PATH=${MODEL_DIR}/model.ckpt-0
TEST_BSZ=16
TEST_NUM_CORE=2
echo 'Preprocess test set...'
python data_utils.py \
--data_dir=${DATA_DIR}/ \
--dataset=enwik8 \
--tgt_len=${TEST_TGT_LEN} \
--per_host_test_bsz=${TEST_BSZ} \
--num_passes=1 \
--use_tpu=False
echo 'Run evaluation on test set...'
python train_gpu.py \
--data_dir=${DATA_DIR}/tfrecords \
--record_info_dir=${DATA_DIR}/tfrecords/ \
--corpus_info_path=${DATA_DIR}/corpus-info.json \
--eval_ckpt_path=${TEST_CKPT_PATH} \
--model_dir=EXP-enwik8 \
--n_layer=${N_LAYER} \
--d_model=${D_MODEL} \
--d_embed=${D_EMBED} \
--n_head=${N_HEAD} \
--d_head=${D_HEAD} \
--d_inner=${D_INNER} \
--dropout=0.0 \
--dropatt=0.0 \
--tgt_len=${TEST_TGT_LEN} \
--mem_len=${TEST_MEM_LEN} \
--clamp_len=${TEST_CLAMP_LEN} \
--same_length=True \
--eval_batch_size=${TEST_BSZ} \
--num_core_per_host=${TEST_NUM_CORE} \
--do_train=False \
--do_eval=True \
--eval_split=test
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment