Commit cb8dde1c authored by hepj's avatar hepj
Browse files

增加transformer-xl模型代码

parent a22e7ca7
./rocblas-bench -f gemm -r f32_r --transposeA N --transposeB N -m 1536 -n 11532 -k 512 --alpha 1 --lda 1536 --ldb 512 --beta 0 --ldc 1536 --atomics_not_allowed
./rocblas-bench -f gemm -r f32_r --transposeA N --transposeB N -m 1536 -n 12288 -k 512 --alpha 1 --lda 1536 --ldb 512 --beta 0 --ldc 1536 --atomics_not_allowed
./rocblas-bench -f gemm -r f32_r --transposeA N --transposeB N -m 2048 -n 5388 -k 512 --alpha 1 --lda 2048 --ldb 512 --beta 0 --ldc 2048 --atomics_not_allowed
./rocblas-bench -f gemm -r f32_r --transposeA N --transposeB N -m 2048 -n 6144 -k 512 --alpha 1 --lda 2048 --ldb 512 --beta 0 --ldc 2048 --atomics_not_allowed
./rocblas-bench -f gemm -r f32_r --transposeA N --transposeB N -m 512 -n 1024 -k 512 --alpha 1 --lda 512 --ldb 512 --beta 0 --ldc 512 --atomics_not_allowed
./rocblas-bench -f gemm -r f32_r --transposeA N --transposeB N -m 512 -n 5388 -k 2048 --alpha 1 --lda 512 --ldb 2048 --beta 0 --ldc 512 --atomics_not_allowed
./rocblas-bench -f gemm -r f32_r --transposeA N --transposeB N -m 512 -n 5388 -k 204 --alpha 1 --lda 512 --ldb 204 --beta 0 --ldc 512 --atomics_not_allowed
./rocblas-bench -f gemm -r f32_r --transposeA N --transposeB N -m 512 -n 5388 -k 512 --alpha 1 --lda 512 --ldb 512 --beta 0 --ldc 512 --atomics_not_allowed
./rocblas-bench -f gemm -r f32_r --transposeA N --transposeB N -m 512 -n 6144 -k 2048 --alpha 1 --lda 512 --ldb 2048 --beta 0 --ldc 512 --atomics_not_allowed
./rocblas-bench -f gemm -r f32_r --transposeA N --transposeB N -m 512 -n 6144 -k 204 --alpha 1 --lda 512 --ldb 204 --beta 0 --ldc 512 --atomics_not_allowed
./rocblas-bench -f gemm -r f32_r --transposeA N --transposeB N -m 512 -n 6144 -k 512 --alpha 1 --lda 512 --ldb 512 --beta 0 --ldc 512 --atomics_not_allowed
./rocblas-bench -f gemm -r f32_r --transposeA N --transposeB N -m 512 -n 961 -k 512 --alpha 1 --lda 512 --ldb 512 --beta 0 --ldc 512 --atomics_not_allowed
./rocblas-bench -f gemm -r f32_r --transposeA N --transposeB T -m 1536 -n 512 -k 11532 --alpha 1 --lda 1536 --ldb 512 --beta 0 --ldc 1536 --atomics_not_allowed
./rocblas-bench -f gemm -r f32_r --transposeA N --transposeB T -m 1536 -n 512 -k 12288 --alpha 1 --lda 1536 --ldb 512 --beta 0 --ldc 1536 --atomics_not_allowed
./rocblas-bench -f gemm -r f32_r --transposeA N --transposeB T -m 2048 -n 512 -k 5388 --alpha 1 --lda 2048 --ldb 512 --beta 0 --ldc 2048 --atomics_not_allowed
./rocblas-bench -f gemm -r f32_r --transposeA N --transposeB T -m 2048 -n 512 -k 6144 --alpha 1 --lda 2048 --ldb 512 --beta 0 --ldc 2048 --atomics_not_allowed
./rocblas-bench -f gemm -r f32_r --transposeA N --transposeB T -m 204 -n 512 -k 5388 --alpha 1 --lda 204 --ldb 512 --beta 0 --ldc 204 --atomics_not_allowed
./rocblas-bench -f gemm -r f32_r --transposeA N --transposeB T -m 204 -n 512 -k 6144 --alpha 1 --lda 204 --ldb 512 --beta 0 --ldc 204 --atomics_not_allowed
./rocblas-bench -f gemm -r f32_r --transposeA N --transposeB T -m 512 -n 2048 -k 5388 --alpha 1 --lda 512 --ldb 2048 --beta 0 --ldc 512 --atomics_not_allowed
./rocblas-bench -f gemm -r f32_r --transposeA N --transposeB T -m 512 -n 2048 -k 6144 --alpha 1 --lda 512 --ldb 2048 --beta 0 --ldc 512 --atomics_not_allowed
./rocblas-bench -f gemm -r f32_r --transposeA N --transposeB T -m 512 -n 512 -k 1024 --alpha 1 --lda 512 --ldb 512 --beta 0 --ldc 512 --atomics_not_allowed
./rocblas-bench -f gemm -r f32_r --transposeA N --transposeB T -m 512 -n 512 -k 5388 --alpha 1 --lda 512 --ldb 512 --beta 0 --ldc 512 --atomics_not_allowed
./rocblas-bench -f gemm -r f32_r --transposeA N --transposeB T -m 512 -n 512 -k 6144 --alpha 1 --lda 512 --ldb 512 --beta 0 --ldc 512 --atomics_not_allowed
./rocblas-bench -f gemm -r f32_r --transposeA N --transposeB T -m 512 -n 512 -k 961 --alpha 1 --lda 512 --ldb 512 --beta 0 --ldc 512 --atomics_not_allowed
./rocblas-bench -f gemm -r f32_r --transposeA T --transposeB N -m 2048 -n 5388 -k 512 --alpha 1 --lda 512 --ldb 512 --beta 0 --ldc 2048 --atomics_not_allowed
./rocblas-bench -f gemm -r f32_r --transposeA T --transposeB N -m 2048 -n 6144 -k 512 --alpha 1 --lda 512 --ldb 512 --beta 0 --ldc 2048 --atomics_not_allowed
./rocblas-bench -f gemm -r f32_r --transposeA T --transposeB N -m 204 -n 5388 -k 512 --alpha 1 --lda 512 --ldb 512 --beta 0 --ldc 204 --atomics_not_allowed
./rocblas-bench -f gemm -r f32_r --transposeA T --transposeB N -m 204 -n 6144 -k 512 --alpha 1 --lda 512 --ldb 512 --beta 0 --ldc 204 --atomics_not_allowed
./rocblas-bench -f gemm -r f32_r --transposeA T --transposeB N -m 512 -n 11532 -k 1536 --alpha 1 --lda 1536 --ldb 1536 --beta 0 --ldc 512 --atomics_not_allowed
./rocblas-bench -f gemm -r f32_r --transposeA T --transposeB N -m 512 -n 12288 -k 1536 --alpha 1 --lda 1536 --ldb 1536 --beta 0 --ldc 512 --atomics_not_allowed
./rocblas-bench -f gemm -r f32_r --transposeA T --transposeB N -m 512 -n 5388 -k 2048 --alpha 1 --lda 2048 --ldb 2048 --beta 0 --ldc 512 --atomics_not_allowed
./rocblas-bench -f gemm -r f32_r --transposeA T --transposeB N -m 512 -n 5388 -k 512 --alpha 1 --lda 512 --ldb 512 --beta 0 --ldc 512 --atomics_not_allowed
./rocblas-bench -f gemm -r f32_r --transposeA T --transposeB N -m 512 -n 6144 -k 2048 --alpha 1 --lda 2048 --ldb 2048 --beta 0 --ldc 512 --atomics_not_allowed
./rocblas-bench -f gemm -r f32_r --transposeA T --transposeB N -m 512 -n 6144 -k 512 --alpha 1 --lda 512 --ldb 512 --beta 0 --ldc 512 --atomics_not_allowed
./rocblas-bench -f gemm_strided_batched -r f32_r --transposeA N --transposeB N -m 1024 -n 512 -k 64 --alpha 1 --lda 1024 --stride_a 65536 --ldb 64 --stride_b 32768 --beta 0 --ldc 1024 --stride_c 524288 --batch_count 96 --atomics_not_allowed
./rocblas-bench -f gemm_strided_batched -r f32_r --transposeA N --transposeB N -m 1024 -n 6144 -k 64 --alpha 1 --lda 1024 --stride_a 65536 --ldb 64 --stride_b 393216 --beta 0 --ldc 1024 --stride_c 6291456 --batch_count 8 --atomics_not_allowed
./rocblas-bench -f gemm_strided_batched -r f32_r --transposeA N --transposeB N -m 64 -n 449 -k 961 --alpha 1 --lda 64 --stride_a 61504 --ldb 961 --stride_b 431489 --beta 0 --ldc 64 --stride_c 28736 --batch_count 96 --atomics_not_allowed
./rocblas-bench -f gemm_strided_batched -r f32_r --transposeA N --transposeB N -m 64 -n 512 -k 1024 --alpha 1 --lda 64 --stride_a 65536 --ldb 1024 --stride_b 524288 --beta 0 --ldc 64 --stride_c 32768 --batch_count 96 --atomics_not_allowed
./rocblas-bench -f gemm_strided_batched -r f32_r --transposeA N --transposeB N -m 961 -n 449 -k 64 --alpha 1 --lda 961 --stride_a 61504 --ldb 64 --stride_b 28736 --beta 0 --ldc 961 --stride_c 431489 --batch_count 96 --atomics_not_allowed
./rocblas-bench -f gemm_strided_batched -r f32_r --transposeA N --transposeB N -m 961 -n 5388 -k 64 --alpha 1 --lda 961 --stride_a 61504 --ldb 64 --stride_b 344832 --beta 0 --ldc 961 --stride_c 5177868 --batch_count 8 --atomics_not_allowed
./rocblas-bench -f gemm_strided_batched -r f32_r --transposeA N --transposeB T -m 1024 -n 64 -k 512 --alpha 1 --lda 1024 --stride_a 524288 --ldb 64 --stride_b 32768 --beta 0 --ldc 1024 --stride_c 65536 --batch_count 96 --atomics_not_allowed
./rocblas-bench -f gemm_strided_batched -r f32_r --transposeA N --transposeB T -m 1024 -n 64 -k 6144 --alpha 1 --lda 1024 --stride_a 6291456 --ldb 64 --stride_b 393216 --beta 0 --ldc 1024 --stride_c 65536 --batch_count 8 --atomics_not_allowed
./rocblas-bench -f gemm_strided_batched -r f32_r --transposeA N --transposeB T -m 64 -n 1024 -k 512 --alpha 1 --lda 64 --stride_a 32768 --ldb 1024 --stride_b 524288 --beta 0 --ldc 64 --stride_c 65536 --batch_count 96 --atomics_not_allowed
./rocblas-bench -f gemm_strided_batched -r f32_r --transposeA N --transposeB T -m 64 -n 961 -k 449 --alpha 1 --lda 64 --stride_a 28736 --ldb 961 --stride_b 431489 --beta 0 --ldc 64 --stride_c 61504 --batch_count 96 --atomics_not_allowed
./rocblas-bench -f gemm_strided_batched -r f32_r --transposeA N --transposeB T -m 961 -n 64 -k 449 --alpha 1 --lda 961 --stride_a 431489 --ldb 64 --stride_b 28736 --beta 0 --ldc 961 --stride_c 61504 --batch_count 96 --atomics_not_allowed
./rocblas-bench -f gemm_strided_batched -r f32_r --transposeA N --transposeB T -m 961 -n 64 -k 5388 --alpha 1 --lda 961 --stride_a 5177868 --ldb 64 --stride_b 344832 --beta 0 --ldc 961 --stride_c 61504 --batch_count 8 --atomics_not_allowed
./rocblas-bench -f gemm_strided_batched -r f32_r --transposeA T --transposeB N -m 1024 -n 512 -k 64 --alpha 1 --lda 64 --stride_a 65536 --ldb 64 --stride_b 32768 --beta 0 --ldc 1024 --stride_c 524288 --batch_count 96 --atomics_not_allowed
./rocblas-bench -f gemm_strided_batched -r f32_r --transposeA T --transposeB N -m 64 -n 449 -k 961 --alpha 1 --lda 961 --stride_a 61504 --ldb 961 --stride_b 431489 --beta 0 --ldc 64 --stride_c 28736 --batch_count 96 --atomics_not_allowed
./rocblas-bench -f gemm_strided_batched -r f32_r --transposeA T --transposeB N -m 64 -n 512 -k 1024 --alpha 1 --lda 1024 --stride_a 65536 --ldb 1024 --stride_b 524288 --beta 0 --ldc 64 --stride_c 32768 --batch_count 96 --atomics_not_allowed
./rocblas-bench -f gemm_strided_batched -r f32_r --transposeA T --transposeB N -m 64 -n 5388 -k 961 --alpha 1 --lda 961 --stride_a 61504 --ldb 961 --stride_b 5177868 --beta 0 --ldc 64 --stride_c 344832 --batch_count 8 --atomics_not_allowed
./rocblas-bench -f gemm_strided_batched -r f32_r --transposeA T --transposeB N -m 64 -n 6144 -k 1024 --alpha 1 --lda 1024 --stride_a 65536 --ldb 1024 --stride_b 6291456 --beta 0 --ldc 64 --stride_c 393216 --batch_count 8 --atomics_not_allowed
./rocblas-bench -f gemm_strided_batched -r f32_r --transposeA T --transposeB N -m 961 -n 449 -k 64 --alpha 1 --lda 64 --stride_a 61504 --ldb 64 --stride_b 28736 --beta 0 --ldc 961 --stride_c 431489 --batch_count 96 --atomics_not_allowed
This diff is collapsed.
This diff is collapsed.
#!/usr/bin/env python
# coding=utf-8
import os
import sys
import zipfile
if os.path.exists('train.txt'):
print('Tokenized enwik8 already exists - skipping processing')
sys.exit()
data = zipfile.ZipFile('enwik8.zip').read('enwik8')
print('Length of enwik8: {}'.format(len(data)))
num_test_chars = 5000000
train_data = data[: -2 * num_test_chars]
valid_data = data[-2 * num_test_chars: -num_test_chars]
test_data = data[-num_test_chars:]
for fn, part in [('train.txt', train_data), ('valid.txt', valid_data), ('test.txt', test_data)]:
print('{} will have {} bytes'.format(fn, len(part)))
print('- Tokenizing...')
part_str = ' '.join([str(c) if c != ord('\n') else '\n' for c in part])
print('- Writing...')
f = open(fn, 'w').write(part_str)
f = open(fn + '.raw', 'wb').write(part)
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import math
import time
from absl import flags
import absl.logging as _logging # pylint: disable=unused-import
import tensorflow as tf
import model
import data_utils
from gpu_utils import assign_to_gpu, average_grads_and_vars
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.python.profiler import model_analyzer
from tensorflow.python.profiler import option_builder
import os
os.environ['TF_ENABLE_AUTO_MIXED_PRECISION'] = '1'
tf.logging.set_verbosity(tf.logging.INFO)
# GPU config
flags.DEFINE_integer("num_hosts", default=1,
help="Number of TPU hosts")
flags.DEFINE_integer("num_core_per_host", default=8,
help="Number of cores per host")
# Experiment (data/checkpoint/directory) config
flags.DEFINE_string("data_dir", default="",
help="Path to tf-records directory.")
flags.DEFINE_string("record_info_dir", default="",
help="Path to local directory containing filenames.txt.")
flags.DEFINE_string("corpus_info_path", default="",
help="Path to corpus-info.json file.")
flags.DEFINE_string("model_dir", default=None,
help="Estimator model_dir.")
flags.DEFINE_bool("do_train", default=True,
help="Whether to run training.")
flags.DEFINE_bool("do_eval", default=False,
help="Whether to run eval on the dev set.")
flags.DEFINE_string("eval_ckpt_path", None,
help="Checkpoint path for do_test evaluation."
"If set, model_dir will be ignored."
"If unset, will use the latest ckpt in model_dir.")
flags.DEFINE_string("warm_start_path", None,
help="Checkpoint path for warm start."
"If set, will clear Adam states."
"Note that the new model_dir should be different"
" from warm_start_path.")
# Optimization config
flags.DEFINE_float("learning_rate", default=2.5e-4,
help="Maximum learning rate.")
flags.DEFINE_float("clip", default=0.25,
help="Gradient clipping value.")
# for cosine decay
flags.DEFINE_float("min_lr_ratio", default=0.004,
help="Minimum ratio learning rate.")
flags.DEFINE_integer("warmup_steps", default=0,
help="Number of steps for linear lr warmup.")
# Training config
flags.DEFINE_integer("train_batch_size", default=60,
help="Size of train batch.")
flags.DEFINE_integer("eval_batch_size", default=60,
help="Size of valid batch.")
flags.DEFINE_integer("train_steps", default=100000,
help="Total number of training steps.")
flags.DEFINE_integer("iterations", default=500,
help="Number of iterations per repeat loop.")
flags.DEFINE_integer("save_steps", default=10000,
help="number of steps for model checkpointing.")
# Evaluation config
flags.DEFINE_bool("do_test", default=False,
help="Run on the test set.")
flags.DEFINE_integer("max_eval_batch", default=-1,
help="Set -1 to turn off. Only used in test mode.")
flags.DEFINE_bool("do_eval_only", default=False,
help="Run evaluation only.")
flags.DEFINE_integer("start_eval_steps", default=10000,
help="Which checkpoint to start with in `do_eval_only` mode.")
flags.DEFINE_string("eval_split", "valid",
help="Which data split to evaluate.")
# Model config
flags.DEFINE_integer("tgt_len", default=70,
help="Number of steps to predict")
flags.DEFINE_integer("mem_len", default=70,
help="Number of steps to cache")
flags.DEFINE_bool("same_length", default=False,
help="Same length attention")
flags.DEFINE_integer("clamp_len", default=-1,
help="Clamp length")
flags.DEFINE_integer("n_layer", default=6,
help="Number of layers.")
flags.DEFINE_integer("d_model", default=500,
help="Dimension of the model.")
flags.DEFINE_integer("d_embed", default=500,
help="Dimension of the embeddings.")
flags.DEFINE_integer("n_head", default=10,
help="Number of attention heads.")
flags.DEFINE_integer("d_head", default=50,
help="Dimension of each attention head.")
flags.DEFINE_integer("d_inner", default=1000,
help="Dimension of inner hidden size in positionwise feed-forward.")
flags.DEFINE_float("dropout", default=0.1,
help="Dropout rate.")
flags.DEFINE_float("dropatt", default=0.1,
help="Attention dropout rate.")
flags.DEFINE_bool("untie_r", default=False,
help="untie r_w_bias and r_r_bias")
# Adaptive Softmax / Embedding
flags.DEFINE_bool("tie_weight", default=True,
help="Tie embedding and softmax weight.")
flags.DEFINE_integer("div_val", default=1,
help="Divide the embedding size by this val for each bin")
flags.DEFINE_bool("proj_share_all_but_first", default=False,
help="True to share all but first projs, False not to share.")
flags.DEFINE_bool("proj_same_dim", default=True,
help="Project the bin with the same dimension.")
# Parameter initialization
flags.DEFINE_enum("init", default="normal",
enum_values=["normal", "uniform"],
help="Initialization method.")
flags.DEFINE_float("init_std", default=0.02,
help="Initialization std when init is normal.")
flags.DEFINE_float("proj_init_std", default=0.01,
help="Initialization std for embedding projection.")
flags.DEFINE_float("init_range", default=0.1,
help="Initialization std when init is uniform.")
FLAGS = flags.FLAGS
def get_model_fn(n_token, cutoffs):
def model_fn(inp, tgt, mems, is_training):
inp = tf.transpose(inp, [1, 0])
tgt = tf.transpose(tgt, [1, 0])
if FLAGS.init == "uniform":
initializer = tf.initializers.random_uniform(
minval=-FLAGS.init_range,
maxval=FLAGS.init_range,
seed=None)
elif FLAGS.init == "normal":
initializer = tf.initializers.random_normal(
stddev=FLAGS.init_std,
seed=None)
proj_initializer = tf.initializers.random_normal(
stddev=FLAGS.proj_init_std,
seed=None)
tie_projs = [False for _ in range(len(cutoffs) + 1)]
if FLAGS.proj_share_all_but_first:
for i in range(1, len(tie_projs)):
tie_projs[i] = True
loss, new_mems = model.transformer(
dec_inp=inp,
target=tgt,
mems=mems,
n_token=n_token,
n_layer=FLAGS.n_layer,
d_model=FLAGS.d_model,
d_embed=FLAGS.d_embed,
n_head=FLAGS.n_head,
d_head=FLAGS.d_head,
d_inner=FLAGS.d_inner,
dropout=FLAGS.dropout,
dropatt=FLAGS.dropatt,
initializer=initializer,
proj_initializer=proj_initializer,
is_training=is_training,
mem_len=FLAGS.mem_len,
cutoffs=cutoffs,
div_val=FLAGS.div_val,
tie_projs=tie_projs,
input_perms=None,
target_perms=None,
head_target=None,
same_length=FLAGS.same_length,
clamp_len=FLAGS.clamp_len,
use_tpu=False,
untie_r=FLAGS.untie_r,
proj_same_dim=FLAGS.proj_same_dim)
# number of parameters
num_params = sum([np.prod(v.shape) for v in tf.trainable_variables()])
tf.logging.info('#params: {}'.format(num_params))
# format_str = '{{:<{0}s}}\t{{}}'.format(
# max([len(v.name) for v in tf.trainable_variables()]))
# for v in tf.trainable_variables():
# tf.logging.info(format_str.format(v.name, v.get_shape()))
if is_training:
all_vars = tf.trainable_variables()
grads = tf.gradients(loss, all_vars)
grads_and_vars = list(zip(grads, all_vars))
return loss, new_mems, grads_and_vars
else:
return loss, new_mems
return model_fn
def single_core_graph(n_token, cutoffs, is_training, inp, tgt, mems):
model_fn = get_model_fn(
n_token=n_token,
cutoffs=cutoffs)
model_ret = model_fn(
inp=inp,
tgt=tgt,
mems=mems,
is_training=is_training)
return model_ret
def train(n_token, cutoffs, ps_device):
##### Get input function and model function
tf.logging.set_verbosity(tf.logging.INFO)
train_input_fn, train_record_info = data_utils.get_input_fn(
record_info_dir=FLAGS.record_info_dir,
split="train",
per_host_bsz=FLAGS.train_batch_size,
tgt_len=FLAGS.tgt_len,
num_core_per_host=FLAGS.num_core_per_host,
num_hosts=1,
use_tpu=False)
tf.logging.info("num of batches {}".format(train_record_info["num_batch"]))
##### Create computational graph
train_set = train_input_fn({
"batch_size": FLAGS.train_batch_size,
"data_dir": FLAGS.data_dir})
input_feed, label_feed = train_set.make_one_shot_iterator().get_next()
inputs = tf.split(input_feed, FLAGS.num_core_per_host, 0)
labels = tf.split(label_feed, FLAGS.num_core_per_host, 0)
per_core_bsz = FLAGS.train_batch_size // FLAGS.num_core_per_host
tower_mems, tower_losses, tower_new_mems, tower_grads_and_vars = [], [], [], []
for i in range(FLAGS.num_core_per_host):
reuse = True if i > 0 else None
with tf.device(assign_to_gpu(i, ps_device)), \
tf.variable_scope(tf.get_variable_scope(), reuse=reuse):
mems_i = [tf.placeholder(tf.float32,
[FLAGS.mem_len, per_core_bsz, FLAGS.d_model])
for _ in range(FLAGS.n_layer)]
loss_i, new_mems_i, grads_and_vars_i = single_core_graph(
n_token=n_token,
cutoffs=cutoffs,
is_training=True,
inp=inputs[i],
tgt=labels[i],
mems=mems_i)
tower_mems.append(mems_i)
tower_losses.append(loss_i)
tower_new_mems.append(new_mems_i)
tower_grads_and_vars.append(grads_and_vars_i)
## average losses and gradients across towers
if len(tower_losses) > 1:
loss = tf.add_n(tower_losses) / len(tower_losses)
grads_and_vars = average_grads_and_vars(tower_grads_and_vars)
else:
loss = tower_losses[0]
grads_and_vars = tower_grads_and_vars[0]
grads, all_vars = zip(*grads_and_vars)
## clip gradient
clipped, gnorm = tf.clip_by_global_norm(grads, FLAGS.clip)
grads_and_vars = list(zip(clipped, all_vars))
## configure the optimizer
global_step = tf.train.get_or_create_global_step()
# warmup stage: increase the learning rate linearly
if FLAGS.warmup_steps > 0:
warmup_lr = tf.to_float(global_step) / tf.to_float(FLAGS.warmup_steps) \
* FLAGS.learning_rate
else:
warmup_lr = 0.0
# decay stage: decay the learning rate using the cosine schedule
decay_lr = tf.train.cosine_decay(
FLAGS.learning_rate,
global_step=global_step-FLAGS.warmup_steps,
decay_steps=FLAGS.train_steps-FLAGS.warmup_steps,
alpha=FLAGS.min_lr_ratio)
# choose warmup or decay
learning_rate = tf.where(global_step < FLAGS.warmup_steps,
warmup_lr, decay_lr)
# get the train op
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = optimizer.apply_gradients(grads_and_vars, global_step)
##### Training loop
tower_mems_np = [
[np.zeros([FLAGS.mem_len, per_core_bsz, FLAGS.d_model], dtype=np.float32)
for layer in range(FLAGS.n_layer)]
for core in range(FLAGS.num_core_per_host)
]
saver = tf.train.Saver()
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
#改《
profiler = model_analyzer.Profiler(graph=sess.graph)
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
#》
sess.run(tf.global_variables_initializer())
if FLAGS.warm_start_path is not None:
tf.logging.info("warm start from {}".format(FLAGS.warm_start_path))
saver.restore(sess, FLAGS.warm_start_path)
fetches = [loss, tower_new_mems, global_step, gnorm, learning_rate, train_op]
total_loss, prev_step = 0., -1
while True:
feed_dict = {}
for i in range(FLAGS.num_core_per_host):
for m, m_np in zip(tower_mems[i], tower_mems_np[i]):
feed_dict[m] = m_np
#改
fetched = sess.run(fetches, feed_dict=feed_dict, options=run_options, run_metadata=run_metadata)
loss_np, tower_mems_np, curr_step = fetched[:3]
total_loss += loss_np
#改<
profiler.add_step(step=curr_step, run_meta=run_metadata)
#>
#改<
if curr_step==0:
start_time=time.time()
if curr_step > 0:
end_time=time.time()
global_step_s=1/(end_time-start_time)
start_time=end_time
tf.logging.info("global_step/sec: {:.6f} , step= {}".format(global_step_s,curr_step))
#tf.logging.info("examples/sec : {}".format(global_step_s * FLAGS.train_batch_size))
#>
if curr_step > 0 and curr_step % FLAGS.iterations == 0:
curr_loss = total_loss / (curr_step - prev_step)
tf.logging.info("[{}] | gnorm {:.2f} lr {:8.6f} "
"| loss {:.2f} | pplx {:>7.2f}, bpc {:>7.4f}".format(
curr_step, fetched[-3], fetched[-2],
curr_loss, math.exp(curr_loss), curr_loss / math.log(2)))
total_loss, prev_step = 0., curr_step
if curr_step > 0 and curr_step % FLAGS.save_steps == 0:
save_path = os.path.join(FLAGS.model_dir, "model.ckpt")
saver.save(sess, save_path)
tf.logging.info("Model saved in path: {}".format(save_path))
if curr_step == FLAGS.train_steps:
break
#改
profile_op_opt_builder = option_builder.ProfileOptionBuilder()
profile_op_opt_builder.select(['micros', 'occurrence'])
profile_op_opt_builder.order_by('occurrence')
profile_op_opt_builder.with_max_depth(10)
#
profile_op_opt_builder.with_step(5)
#将结果打印到文件
profile_op_opt_builder.with_file_output("./prof.txt")
#
profile_op_opt_builder.with_timeline_output("./prof.json")
# 显示视图为op view
profiler.profile_operations(profile_op_opt_builder.build())
def evaluate(n_token, cutoffs, ps_device):
##### Get input function and model function
eval_input_fn, eval_record_info = data_utils.get_input_fn(
record_info_dir=FLAGS.record_info_dir,
split=FLAGS.eval_split,
per_host_bsz=FLAGS.eval_batch_size,
tgt_len=FLAGS.tgt_len,
num_core_per_host=FLAGS.num_core_per_host,
num_hosts=1,
use_tpu=False)
num_batch = eval_record_info["num_batch"]
if FLAGS.max_eval_batch > 0:
num_batch = FLAGS.max_eval_batch
tf.logging.info("num of batches {}".format(num_batch))
##### Create computational graph
eval_set = eval_input_fn({
"batch_size": FLAGS.eval_batch_size,
"data_dir": FLAGS.data_dir})
input_feed, label_feed = eval_set.make_one_shot_iterator().get_next()
inputs = tf.split(input_feed, FLAGS.num_core_per_host, 0)
labels = tf.split(label_feed, FLAGS.num_core_per_host, 0)
per_core_bsz = FLAGS.eval_batch_size // FLAGS.num_core_per_host
tower_mems, tower_losses, tower_new_mems = [], [], []
for i in range(FLAGS.num_core_per_host):
with tf.device(assign_to_gpu(i, ps_device)), \
tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):
mems_i = [tf.placeholder(tf.float32,
[FLAGS.mem_len, per_core_bsz, FLAGS.d_model])
for _ in range(FLAGS.n_layer)]
loss_i, new_mems_i = single_core_graph(
n_token=n_token,
cutoffs=cutoffs,
is_training=False,
inp=inputs[i],
tgt=labels[i],
mems=mems_i)
tower_mems.append(mems_i)
tower_losses.append(loss_i)
tower_new_mems.append(new_mems_i)
## sum losses across towers
if len(tower_losses) > 1:
loss = tf.add_n(tower_losses) / len(tower_losses)
else:
loss = tower_losses[0]
##### Evaluation loop
tower_mems_np = [
[np.zeros([FLAGS.mem_len, per_core_bsz, FLAGS.d_model], dtype=np.float32)
for layer in range(FLAGS.n_layer)]
for core in range(FLAGS.num_core_per_host)
]
saver = tf.train.Saver()
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
sess.run(tf.global_variables_initializer())
if FLAGS.eval_ckpt_path is None:
eval_ckpt_path = tf.train.latest_checkpoint(FLAGS.model_dir)
else:
eval_ckpt_path = FLAGS.eval_ckpt_path
tf.logging.info("Evaluate {}".format(eval_ckpt_path))
saver.restore(sess, eval_ckpt_path)
fetches = [loss, tower_new_mems, tf.size(label_feed)]
format_str = " >> processing batch {{:{0}d}}/{{:{0}d}} ..".format(
len(str(num_batch)))
total_loss, total_cnt = 0, 0
for step in range(num_batch):
if step % (num_batch // 10) == 0:
tf.logging.info(format_str.format(step, num_batch))
feed_dict = {}
for i in range(FLAGS.num_core_per_host):
for m, m_np in zip(tower_mems[i], tower_mems_np[i]):
feed_dict[m] = m_np
fetched = sess.run(fetches, feed_dict=feed_dict)
loss_np, tower_mems_np, cnt_np = fetched[:3]
total_loss += loss_np * cnt_np
total_cnt += cnt_np
avg_loss = total_loss / total_cnt
tf.logging.info("| loss {:.2f} | pplx {:>7.2f}, bpc {:>7.4f}".format(
avg_loss, math.exp(avg_loss), avg_loss / math.log(2)))
def main(unused_argv):
del unused_argv # Unused
tf.logging.set_verbosity(tf.logging.INFO)
# Get corpus info
corpus_info = data_utils.get_corpus_info(FLAGS.corpus_info_path)
n_token = corpus_info["vocab_size"]
cutoffs = corpus_info["cutoffs"][1:-1]
tf.logging.info("n_token {}".format(n_token))
if FLAGS.do_train:
tf.logging.set_verbosity(tf.logging.INFO)
train(n_token, cutoffs, "/gpu:0")
if FLAGS.do_eval:
evaluate(n_token, cutoffs, "/gpu:0")
if __name__ == "__main__":
tf.app.run()
#!/bin/bash
'''export HSA_FORCE_FINE_GRAIN_PCIE=1
export MIOPEN_FIND_MODE=3
export MIOPEN_ENABLE_LOGGING_CMD=1
export ROCBLAS_LAYER=3
module unload compiler/rocm/2.9
echo "MIOPEN_FIND_MODE=$MIOPEN_FIND_MODE"
lrank=$OMPI_COMM_WORLD_LOCAL_RANK
comm_rank=$OMPI_COMM_WORLD_RANK
comm_size=$OMPI_COMM_WORLD_SIZE
#NCCL_DEBUG=INFO
'''
# Data
#DATA_ROOT=../data/enwik8/
DATA_ROOT=/work/home/hepj/tf1/transformer-xl-master/data/enwik8/
MODEL_DIR=./EXP-enwik8_1_test
# Model
N_LAYER=12
D_MODEL=512
D_EMBED=512
N_HEAD=8
D_HEAD=64
D_INNER=2048
# Training
TGT_LEN=512
MEM_LEN=512
TRAIN_STEPS=14483
BSZ=12 #12
NUM_CORE=1
# Testing
TEST_TGT_LEN=80
TEST_MEM_LEN=2100
TEST_CLAMP_LEN=820
TEST_BSZ=10
TEST_NUM_CORE=1
if [[ $1 == 'train_data' ]]; then
python data_utils.py \
--data_dir=${DATA_ROOT}/ \
--dataset=enwik8 \
--tgt_len=${TGT_LEN} \
--per_host_train_bsz=${BSZ} \
--per_host_valid_bsz=${BSZ} \
--num_passes=1 \
--use_tpu=False \
${@:2}
elif [[ $1 == 'test_data' ]]; then
python data_utils.py \
--data_dir=${DATA_ROOT}/ \
--dataset=enwik8 \
--tgt_len=${TEST_TGT_LEN} \
--per_host_test_bsz=${TEST_BSZ} \
--num_passes=1 \
--use_tpu=False \
${@:2}
elif [[ $1 == 'train' ]]; then
echo 'Run training...'
python train_gpu_test.py \
--data_dir=${DATA_ROOT}/tfrecords \
--record_info_dir=${DATA_ROOT}/tfrecords/ \
--corpus_info_path=${DATA_ROOT}/corpus-info.json \
--model_dir=${MODEL_DIR} \
--n_layer=${N_LAYER} \
--d_model=${D_MODEL} \
--d_embed=${D_EMBED} \
--n_head=${N_HEAD} \
--d_head=${D_HEAD} \
--d_inner=${D_INNER} \
--dropout=0.1 \
--dropatt=0.0 \
--learning_rate=0.00025 \
--warmup_steps=0 \
--train_steps=${TRAIN_STEPS} \
--tgt_len=${TGT_LEN} \
--mem_len=${MEM_LEN} \
--train_batch_size=${BSZ} \
--num_core_per_host=${NUM_CORE} \
--iterations=200 \
--save_steps=4000 \
--do_train=True \
--do_eval=False \
${@:2}
elif [[ $1 == 'eval' ]]; then
echo 'Run evaluation...'
python train_gpu.py \
--data_dir=${DATA_ROOT}/tfrecords \
--record_info_dir=${DATA_ROOT}/tfrecords/ \
--corpus_info_path=${DATA_ROOT}/corpus-info.json \
--model_dir=EXP-enwik8 \
--n_layer=${N_LAYER} \
--d_model=${D_MODEL} \
--d_embed=${D_EMBED} \
--n_head=${N_HEAD} \
--d_head=${D_HEAD} \
--d_inner=${D_INNER} \
--dropout=0.0 \
--dropatt=0.0 \
--tgt_len=${TEST_TGT_LEN} \
--mem_len=${TEST_MEM_LEN} \
--clamp_len=${TEST_CLAMP_LEN} \
--same_length=True \
--eval_batch_size=${TEST_BSZ} \
--num_core_per_host=${TEST_NUM_CORE} \
--do_train=False \
--do_eval=True \
--eval_split=test \
${@:2}
else
echo 'unknown argment 1'
fi
#!/bin/bash
# Data
#DATA_ROOT=../data/enwik8/
DATA_ROOT=/work/home/hepj/tf1/transformer-xl-master/data/enwik8/
# Model
N_LAYER=12
D_MODEL=512
D_EMBED=512
N_HEAD=8
D_HEAD=64
D_INNER=2048
# Training
TGT_LEN=512
MEM_LEN=512
TRAIN_STEPS=14483 # 7242 #从数据读取信息可以知道数据有这么多个batch,因此跑这么多step
#测试使用12
BSZ=12
NUM_CORE=4
# Testing
TEST_TGT_LEN=80
TEST_MEM_LEN=2100
TEST_CLAMP_LEN=820
TEST_BSZ=10
TEST_NUM_CORE=1
if [[ $1 == 'train_data' ]]; then
python data_utils.py \
--data_dir=${DATA_ROOT}/ \
--dataset=enwik8 \
--tgt_len=${TGT_LEN} \
--per_host_train_bsz=${BSZ} \
--per_host_valid_bsz=${BSZ} \
--num_passes=1 \
--use_tpu=False \
${@:2}
elif [[ $1 == 'test_data' ]]; then
python data_utils.py \
--data_dir=${DATA_ROOT}/ \
--dataset=enwik8 \
--tgt_len=${TEST_TGT_LEN} \
--per_host_test_bsz=${TEST_BSZ} \
--num_passes=1 \
--use_tpu=False \
${@:2}
elif [[ $1 == 'train' ]]; then
echo 'Run training...'
#改
#python train_gpu.py \
python train_gpu_test.py \
--data_dir=${DATA_ROOT}/tfrecords \
--record_info_dir=${DATA_ROOT}/tfrecords/ \
--corpus_info_path=${DATA_ROOT}/corpus-info.json \
--model_dir=EXP-enwik8_4_new_bs12 \
--n_layer=${N_LAYER} \
--d_model=${D_MODEL} \
--d_embed=${D_EMBED} \
--n_head=${N_HEAD} \
--d_head=${D_HEAD} \
--d_inner=${D_INNER} \
--dropout=0.1 \
--dropatt=0.0 \
--learning_rate=0.00025 \
--warmup_steps=0 \
--train_steps=${TRAIN_STEPS} \
--tgt_len=${TGT_LEN} \
--mem_len=${MEM_LEN} \
--train_batch_size=${BSZ} \
--num_core_per_host=${NUM_CORE} \
--iterations=200 \
--save_steps=4000 \
--do_train=True \
--do_eval=False \
${@:2}
elif [[ $1 == 'eval' ]]; then
echo 'Run evaluation...'
python train_gpu.py \
--data_dir=${DATA_ROOT}/tfrecords \
--record_info_dir=${DATA_ROOT}/tfrecords/ \
--corpus_info_path=${DATA_ROOT}/corpus-info.json \
--model_dir=EXP-enwik8 \
--n_layer=${N_LAYER} \
--d_model=${D_MODEL} \
--d_embed=${D_EMBED} \
--n_head=${N_HEAD} \
--d_head=${D_HEAD} \
--d_inner=${D_INNER} \
--dropout=0.0 \
--dropatt=0.0 \
--tgt_len=${TEST_TGT_LEN} \
--mem_len=${TEST_MEM_LEN} \
--clamp_len=${TEST_CLAMP_LEN} \
--same_length=True \
--eval_batch_size=${TEST_BSZ} \
--num_core_per_host=${TEST_NUM_CORE} \
--do_train=False \
--do_eval=True \
--eval_split=test \
${@:2}
else
echo 'unknown argment 1'
fi
#!/bin/bash
'''export HSA_FORCE_FINE_GRAIN_PCIE=1
export MIOPEN_FIND_MODE=3
export MIOPEN_ENABLE_LOGGING_CMD=1
export ROCBLAS_LAYER=3
module unload compiler/rocm/2.9
echo "MIOPEN_FIND_MODE=$MIOPEN_FIND_MODE"
lrank=$OMPI_COMM_WORLD_LOCAL_RANK
comm_rank=$OMPI_COMM_WORLD_RANK
comm_size=$OMPI_COMM_WORLD_SIZE
#NCCL_DEBUG=INFO
'''
export HIP_VISIBLE_DEVICES=0
# Data
#DATA_ROOT=../data/enwik8/
DATA_ROOT=/public/home/hepj/SothisAI/transformer-xl-master/data/text8
# Model
N_LAYER=12
D_MODEL=512
D_EMBED=512
N_HEAD=8
D_HEAD=64
D_INNER=2048
# Training
TGT_LEN=512
MEM_LEN=512
TRAIN_STEPS=14483
BSZ=12
NUM_CORE=1
# Testing
TEST_TGT_LEN=80
TEST_MEM_LEN=2100
TEST_CLAMP_LEN=820
TEST_BSZ=10
TEST_NUM_CORE=1
if [[ $1 == 'train_data' ]]; then
python data_utils.py \
--data_dir=${DATA_ROOT}/ \
--dataset=enwik8 \
--tgt_len=${TGT_LEN} \
--per_host_train_bsz=${BSZ} \
--per_host_valid_bsz=${BSZ} \
--num_passes=1 \
--use_tpu=False \
${@:2}
elif [[ $1 == 'test_data' ]]; then
python data_utils.py \
--data_dir=${DATA_ROOT}/ \
--dataset=enwik8 \
--tgt_len=${TEST_TGT_LEN} \
--per_host_test_bsz=${TEST_BSZ} \
--num_passes=1 \
--use_tpu=False \
${@:2}
elif [[ $1 == 'train' ]]; then
echo 'Run training...'
python train_gpu_test.py \
--data_dir=${DATA_ROOT}/tfrecords \
--record_info_dir=${DATA_ROOT}/tfrecords/ \
--corpus_info_path=${DATA_ROOT}/corpus-info.json \
--model_dir=EXP-enwik8_test \
--n_layer=${N_LAYER} \
--d_model=${D_MODEL} \
--d_embed=${D_EMBED} \
--n_head=${N_HEAD} \
--d_head=${D_HEAD} \
--d_inner=${D_INNER} \
--dropout=0.1 \
--dropatt=0.0 \
--learning_rate=0.00025 \
--warmup_steps=0 \
--train_steps=${TRAIN_STEPS} \
--tgt_len=${TGT_LEN} \
--mem_len=${MEM_LEN} \
--train_batch_size=${BSZ} \
--num_core_per_host=${NUM_CORE} \
--iterations=200 \
--save_steps=4000 \
--do_train=True \
--do_eval=False \
${@:2}
elif [[ $1 == 'eval' ]]; then
echo 'Run evaluation...'
python train_gpu.py \
--data_dir=${DATA_ROOT}/tfrecords \
--record_info_dir=${DATA_ROOT}/tfrecords/ \
--corpus_info_path=${DATA_ROOT}/corpus-info.json \
--model_dir=EXP-enwik8 \
--n_layer=${N_LAYER} \
--d_model=${D_MODEL} \
--d_embed=${D_EMBED} \
--n_head=${N_HEAD} \
--d_head=${D_HEAD} \
--d_inner=${D_INNER} \
--dropout=0.0 \
--dropatt=0.0 \
--tgt_len=${TEST_TGT_LEN} \
--mem_len=${TEST_MEM_LEN} \
--clamp_len=${TEST_CLAMP_LEN} \
--same_length=True \
--eval_batch_size=${TEST_BSZ} \
--num_core_per_host=${TEST_NUM_CORE} \
--do_train=False \
--do_eval=True \
--eval_split=test \
${@:2}
else
echo 'unknown argment 1'
fi
#!/bin/bash
# Path
LOCAL_DIR=../data/enwik8/
GSDATA=
GSEXP=
# TPU setting
NUM_HOST=2
NUM_CORE=16 # TPUv2 -> 8 | TPUv3 -> 16
TEST_NUM_HOST=1
TEST_NUM_CORE=8 # TPUv2 -> 8 | TPUv3 -> 16
# Model
N_LAYER=24
D_MODEL=1024
D_EMBED=1024
N_HEAD=8
D_HEAD=128
D_INNER=3072
# Training
TGT_LEN=768
MEM_LEN=768
TRAIN_BSZ=64
VALID_BSZ=64
# Testing
TEST_TGT_LEN=128
TEST_MEM_LEN=3800
TEST_CLAMP_LEN=1000
TEST_BSZ=16
if [[ $1 == 'train_data' ]]; then
python data_utils.py \
--data_dir=${LOCAL_DIR}/ \
--dataset=enwik8 \
--tgt_len=${TGT_LEN} \
--per_host_train_bsz=${TRAIN_BSZ} \
--per_host_valid_bsz=${VALID_BSZ} \
--num_core_per_host=${NUM_CORE} \
--num_passes=10 \
--use_tpu=True \
${@:2}
SRC_PATTERN=train.bsz-${TRAIN_BSZ}.tlen-${TGT_LEN}.core-${NUM_CORE}*
gsutil cp ${LOCAL_DIR}/tfrecords/${SRC_PATTERN} ${GSDATA}/enwik8-tfrecords/
SRC_PATTERN=valid.bsz-${VALID_BSZ}.tlen-${TGT_LEN}.core-${NUM_CORE}*
gsutil cp ${LOCAL_DIR}/tfrecords/${SRC_PATTERN} ${GSDATA}/enwik8-tfrecords/
elif [[ $1 == 'test_data' ]]; then
python data_utils.py \
--data_dir=${LOCAL_DIR}/ \
--dataset=enwik8 \
--tgt_len=${TEST_TGT_LEN} \
--per_host_test_bsz=${TEST_BSZ} \
--num_core_per_host=${TEST_NUM_CORE} \
--num_passes=1 \
--use_tpu=True \
${@:2}
SRC_PATTERN=test.bsz-${TEST_BSZ}.tlen-${TEST_TGT_LEN}.core-${TEST_NUM_CORE}*
gsutil cp ${LOCAL_DIR}/tfrecords/${SRC_PATTERN} ${GSDATA}/enwik8-tfrecords/
elif [[ $1 == 'train' ]]; then
echo 'Run training...'
python train.py \
--data_dir=${GSDATA}/enwik8-tfrecords \
--record_info_dir=${LOCAL_DIR}/tfrecords/ \
--corpus_info_path=${LOCAL_DIR}/corpus-info.json \
--model_dir=${GSEXP}/enwik8 \
--n_layer=${N_LAYER} \
--d_model=${D_MODEL} \
--d_embed=${D_EMBED} \
--n_head=${N_HEAD} \
--d_head=${D_HEAD} \
--d_inner=${D_INNER} \
--dropout=0.15 \
--dropatt=0.15 \
--learning_rate=0.00025 \
--warmup_steps=4000 \
--train_steps=400000 \
--tgt_len=${TGT_LEN} \
--mem_len=${MEM_LEN} \
--train_batch_size=${TRAIN_BSZ} \
--use_tpu=True \
--num_host=${NUM_HOST} \
--num_core_per_host=${NUM_CORE} \
--iterations=1000 \
--save_steps=10000 \
--do_train=True \
--do_eval=False \
${@:2}
elif [[ $1 == 'eval' ]]; then
echo 'Run evaluation...'
python train.py \
--data_dir=${GSDATA}/enwik8-tfrecords \
--record_info_dir=${LOCAL_DIR}/tfrecords/ \
--corpus_info_path=${LOCAL_DIR}/corpus-info.json \
--model_dir=${GSEXP}/enwik8 \
--n_layer=${N_LAYER} \
--d_model=${D_MODEL} \
--d_embed=${D_EMBED} \
--n_head=${N_HEAD} \
--d_head=${D_HEAD} \
--d_inner=${D_INNER} \
--tgt_len=${TEST_TGT_LEN} \
--mem_len=${TEST_MEM_LEN} \
--eval_batch_size=${TEST_BSZ} \
--num_host=${TEST_NUM_HOST} \
--num_core_per_host=${TEST_NUM_CORE} \
--use_tpu=True \
--do_train=False \
--do_eval_only=True \
--eval_split=test \
${@:2}
else
echo 'unknown argment 1'
fi
#!/bin/bash
# Data
DATA_ROOT=../data/one-billion-words/
# Model
DIV_VAL=4
N_LAYER=18
D_MODEL=1024
D_EMBED=1024
N_HEAD=8
D_HEAD=128
D_INNER=4096
# Training
TGT_LEN=256
MEM_LEN=256
BSZ=256
NUM_CORE=4
# Testing
TEST_TGT_LEN=32
TEST_MEM_LEN=128
TEST_CLAMP_LEN=-1
TEST_BSZ=16
TEST_NUM_CORE=1
if [[ $1 == 'train_data' ]]; then
python data_utils.py \
--data_dir=${DATA_ROOT}/ \
--dataset=lm1b \
--tgt_len=${TGT_LEN} \
--per_host_train_bsz=${BSZ} \
--per_host_valid_bsz=${BSZ} \
--num_passes=1 \
--use_tpu=False \
${@:2}
elif [[ $1 == 'test_data' ]]; then
python data_utils.py \
--data_dir=${DATA_ROOT}/ \
--dataset=lm1b \
--tgt_len=${TEST_TGT_LEN} \
--per_host_test_bsz=${TEST_BSZ} \
--num_passes=1 \
--use_tpu=False \
${@:2}
elif [[ $1 == 'train' ]]; then
echo 'Run training...'
python train_gpu.py \
--data_dir=${DATA_ROOT}/tfrecords \
--record_info_dir=${DATA_ROOT}/tfrecords/ \
--corpus_info_path=${DATA_ROOT}/corpus-info.json \
--model_dir=EXP-lm1b \
--div_val=${DIV_VAL} \
--untie_r=True \
--proj_share_all_but_first=False \
--proj_same_dim=False \
--n_layer=${N_LAYER} \
--d_model=${D_MODEL} \
--d_embed=${D_EMBED} \
--n_head=${N_HEAD} \
--d_head=${D_HEAD} \
--d_inner=${D_INNER} \
--dropout=0.1 \
--dropatt=0.0 \
--learning_rate=0.00025 \
--warmup_steps=0 \
--train_steps=400000 \
--tgt_len=${TGT_LEN} \
--mem_len=${MEM_LEN} \
--train_batch_size=${BSZ} \
--num_core_per_host=${NUM_CORE} \
--iterations=200 \
--save_steps=4000 \
${@:2}
elif [[ $1 == 'eval' ]]; then
echo 'Run evaluation...'
python train_gpu.py \
--data_dir=${DATA_ROOT}/tfrecords \
--record_info_dir=${DATA_ROOT}/tfrecords/ \
--corpus_info_path=${DATA_ROOT}/corpus-info.json \
--model_dir=EXP-lm1b \
--div_val=${DIV_VAL} \
--untie_r=True \
--proj_share_all_but_first=False \
--proj_same_dim=False \
--n_layer=${N_LAYER} \
--d_model=${D_MODEL} \
--d_embed=${D_EMBED} \
--n_head=${N_HEAD} \
--d_head=${D_HEAD} \
--d_inner=${D_INNER} \
--dropout=0.0 \
--dropatt=0.0 \
--tgt_len=${TEST_TGT_LEN} \
--mem_len=${TEST_MEM_LEN} \
--clamp_len=${TEST_CLAMP_LEN} \
--same_length=True \
--eval_batch_size=${TEST_BSZ} \
--num_core_per_host=${TEST_NUM_CORE} \
--do_train=False \
--do_eval=True \
--eval_split=test \
${@:2}
else
echo 'unknown argment 1'
fi
#!/bin/bash
# Path
LOCAL_DIR=../data/one-billion-words/
GSDATA=
GSEXP=
# TPU setting
NUM_HOST=32
NUM_CORE=16 # TPUv2 -> 8 | TPUv3 -> 16
TEST_NUM_HOST=1
TEST_NUM_CORE=8 # TPUv2 -> 8 | TPUv3 -> 16
# Model
DIV_VAL=4
N_LAYER=24
D_MODEL=1280
D_EMBED=1280
N_HEAD=16
D_HEAD=80
D_INNER=8192
# Training
TGT_LEN=32
MEM_LEN=32
TRAIN_BSZ=512
VALID_BSZ=512
TRAIN_BSZ_PER_HOST=$((TRAIN_BSZ / NUM_HOST))
VALID_BSZ_PER_HOST=$((VALID_BSZ / NUM_HOST))
# Testing
TEST_TGT_LEN=32
TEST_MEM_LEN=128
TEST_CLAMP_LEN=-1
TEST_BSZ=8
if [[ $1 == 'train_data' ]]; then
python data_utils.py \
--data_dir=${LOCAL_DIR}/ \
--dataset=lm1b \
--tgt_len=${TGT_LEN} \
--per_host_train_bsz=${TRAIN_BSZ_PER_HOST} \
--per_host_valid_bsz=${VALID_BSZ_PER_HOST} \
--num_core_per_host=${NUM_CORE} \
--num_passes=10 \
--use_tpu=True \
${@:2}
SRC_PATTERN=train.bsz-${TRAIN_BSZ}.tlen-${TGT_LEN}.core-${NUM_CORE}*
gsutil cp ${LOCAL_DIR}/tfrecords/${SRC_PATTERN} ${GSDATA}/lm1b-tfrecords/
SRC_PATTERN=valid.bsz-${VALID_BSZ}.tlen-${TGT_LEN}.core-${NUM_CORE}*
gsutil cp ${LOCAL_DIR}/tfrecords/${SRC_PATTERN} ${GSDATA}/lm1b-tfrecords/
elif [[ $1 == 'test_data' ]]; then
python data_utils.py \
--data_dir=${LOCAL_DIR}/ \
--dataset=lm1b \
--tgt_len=${TEST_TGT_LEN} \
--per_host_test_bsz=${TEST_BSZ} \
--num_core_per_host=${TEST_NUM_CORE} \
--num_passes=1 \
--use_tpu=True \
${@:2}
SRC_PATTERN=test.bsz-${TEST_BSZ}.tlen-${TEST_TGT_LEN}.core-${TEST_NUM_CORE}*
gsutil cp ${LOCAL_DIR}/tfrecords/${SRC_PATTERN} ${GSDATA}/lm1b-tfrecords/
elif [[ $1 == 'train' ]]; then
echo 'Run training...'
python train.py \
--data_dir=${GSDATA}/lm1b-tfrecords \
--record_info_dir=${LOCAL_DIR}/tfrecords/ \
--corpus_info_path=${LOCAL_DIR}/corpus-info.json \
--model_dir=${GSEXP}/lm1b \
--div_val=${DIV_VAL} \
--untie_r=True \
--proj_share_all_but_first=False \
--proj_same_dim=False \
--n_layer=${N_LAYER} \
--d_model=${D_MODEL} \
--d_embed=${D_EMBED} \
--n_head=${N_HEAD} \
--d_head=${D_HEAD} \
--d_inner=${D_INNER} \
--dropout=0.05 \
--dropatt=0.05 \
--init_std=0.005 \
--learning_rate=0.0001 \
--warmup_steps=30000 \
--train_steps=1200000 \
--tgt_len=${TGT_LEN} \
--mem_len=${MEM_LEN} \
--train_batch_size=${TRAIN_BSZ} \
--num_hosts=${NUM_HOST} \
--num_core_per_host=${NUM_CORE} \
--iterations=1000 \
--save_steps=10000 \
--use_tpu=True \
--do_eval=False \
${@:2}
elif [[ $1 == 'eval' ]]; then
echo 'Run evaluation...'
python train.py \
--data_dir=${GSDATA}/lm1b-tfrecords \
--record_info_dir=${LOCAL_DIR}/tfrecords/ \
--corpus_info_path=${LOCAL_DIR}/corpus-info.json \
--model_dir=${GSEXP}/lm1b \
--div_val=${DIV_VAL} \
--untie_r=True \
--proj_share_all_but_first=False \
--proj_same_dim=False \
--n_layer=${N_LAYER} \
--d_model=${D_MODEL} \
--d_embed=${D_EMBED} \
--n_head=${N_HEAD} \
--d_head=${D_HEAD} \
--d_inner=${D_INNER} \
--tgt_len=${TEST_TGT_LEN} \
--mem_len=${TEST_MEM_LEN} \
--clamp_len=${TEST_CLAMP_LEN} \
--same_length=True \
--eval_batch_size=${TEST_BSZ} \
--num_host=${TEST_NUM_HOST} \
--num_core_per_host=${TEST_NUM_CORE} \
--use_tpu=True \
--do_train=False \
--do_eval_only=True \
--eval_split=test \
${@:2}
else
echo 'unknown argment 1'
fi
#!/bin/bash
# Data
DATA_ROOT=../data/text8/
# Model
N_LAYER=12
D_MODEL=512
D_EMBED=512
N_HEAD=8
D_HEAD=64
D_INNER=2048
# Training
TGT_LEN=512
MEM_LEN=512
BSZ=24
NUM_CORE=4
# Testing
TEST_TGT_LEN=80
TEST_MEM_LEN=2100
TEST_CLAMP_LEN=820
TEST_BSZ=10
TEST_NUM_CORE=1
if [[ $1 == 'train_data' ]]; then
python data_utils.py \
--data_dir=${DATA_ROOT}/ \
--dataset=text8 \
--tgt_len=${TGT_LEN} \
--per_host_train_bsz=${BSZ} \
--per_host_valid_bsz=${BSZ} \
--num_passes=1 \
--use_tpu=False \
${@:2}
elif [[ $1 == 'test_data' ]]; then
python data_utils.py \
--data_dir=${DATA_ROOT}/ \
--dataset=text8 \
--tgt_len=${TEST_TGT_LEN} \
--per_host_test_bsz=${TEST_BSZ} \
--num_passes=1 \
--use_tpu=False \
${@:2}
elif [[ $1 == 'train' ]]; then
echo 'Run training...'
python train_gpu.py \
--data_dir=${DATA_ROOT}/tfrecords \
--record_info_dir=${DATA_ROOT}/tfrecords/ \
--corpus_info_path=${DATA_ROOT}/corpus-info.json \
--model_dir=EXP-text8 \
--n_layer=${N_LAYER} \
--d_model=${D_MODEL} \
--d_embed=${D_EMBED} \
--n_head=${N_HEAD} \
--d_head=${D_HEAD} \
--d_inner=${D_INNER} \
--dropout=0.1 \
--dropatt=0.0 \
--learning_rate=0.00025 \
--warmup_steps=0 \
--train_steps=400000 \
--tgt_len=${TGT_LEN} \
--mem_len=${MEM_LEN} \
--train_batch_size=${BSZ} \
--num_core_per_host=${NUM_CORE} \
--iterations=200 \
--save_steps=4000 \
--do_train=True \
--do_eval=False \
${@:2}
elif [[ $1 == 'eval' ]]; then
echo 'Run evaluation...'
python train_gpu.py \
--data_dir=${DATA_ROOT}/tfrecords \
--record_info_dir=${DATA_ROOT}/tfrecords/ \
--corpus_info_path=${DATA_ROOT}/corpus-info.json \
--model_dir=EXP-text8 \
--n_layer=${N_LAYER} \
--d_model=${D_MODEL} \
--d_embed=${D_EMBED} \
--n_head=${N_HEAD} \
--d_head=${D_HEAD} \
--d_inner=${D_INNER} \
--dropout=0.0 \
--dropatt=0.0 \
--tgt_len=${TEST_TGT_LEN} \
--mem_len=${TEST_MEM_LEN} \
--clamp_len=${TEST_CLAMP_LEN} \
--same_length=True \
--eval_batch_size=${TEST_BSZ} \
--num_core_per_host=${TEST_NUM_CORE} \
--do_train=False \
--do_eval=True \
--eval_split=test \
${@:2}
else
echo 'unknown argment 1'
fi
\ No newline at end of file
#!/bin/bash
# Path
LOCAL_DIR=../data/text8/
GSDATA=
GSEXP=
# TPU setting
NUM_HOST=2
NUM_CORE=16 # TPUv2 -> 8 | TPUv3 -> 16
TEST_NUM_HOST=1
TEST_NUM_CORE=8 # TPUv2 -> 8 | TPUv3 -> 16
# Model
N_LAYER=24
D_MODEL=1024
D_EMBED=1024
N_HEAD=8
D_HEAD=128
D_INNER=3072
# Training
TGT_LEN=768
MEM_LEN=768
TRAIN_BSZ=64
VALID_BSZ=64
# Testing
TEST_TGT_LEN=128
TEST_MEM_LEN=3800
TEST_CLAMP_LEN=1000
TEST_BSZ=16
if [[ $1 == 'train_data' ]]; then
python data_utils.py \
--data_dir=${LOCAL_DIR}/ \
--dataset=text8 \
--tgt_len=${TGT_LEN} \
--per_host_train_bsz=${TRAIN_BSZ} \
--per_host_valid_bsz=${VALID_BSZ} \
--num_core_per_host=${NUM_CORE} \
--num_passes=10 \
--use_tpu=True \
${@:2}
SRC_PATTERN=train.bsz-${TRAIN_BSZ}.tlen-${TGT_LEN}.core-${NUM_CORE}*
gsutil cp ${LOCAL_DIR}/tfrecords/${SRC_PATTERN} ${GSDATA}/text8-tfrecords/
SRC_PATTERN=valid.bsz-${VALID_BSZ}.tlen-${TGT_LEN}.core-${NUM_CORE}*
gsutil cp ${LOCAL_DIR}/tfrecords/${SRC_PATTERN} ${GSDATA}/text8-tfrecords/
elif [[ $1 == 'test_data' ]]; then
python data_utils.py \
--data_dir=${LOCAL_DIR}/ \
--dataset=text8 \
--tgt_len=${TEST_TGT_LEN} \
--per_host_test_bsz=${TEST_BSZ} \
--num_core_per_host=${TEST_NUM_CORE} \
--num_passes=1 \
--use_tpu=True \
${@:2}
SRC_PATTERN=test.bsz-${TEST_BSZ}.tlen-${TEST_TGT_LEN}.core-${TEST_NUM_CORE}*
gsutil cp ${LOCAL_DIR}/tfrecords/${SRC_PATTERN} ${GSDATA}/text8-tfrecords/
elif [[ $1 == 'train' ]]; then
echo 'Run training...'
python train.py \
--data_dir=${GSDATA}/text8-tfrecords \
--record_info_dir=${LOCAL_DIR}/tfrecords/ \
--corpus_info_path=${LOCAL_DIR}/corpus-info.json \
--model_dir=${GSEXP}/text8 \
--n_layer=${N_LAYER} \
--d_model=${D_MODEL} \
--d_embed=${D_EMBED} \
--n_head=${N_HEAD} \
--d_head=${D_HEAD} \
--d_inner=${D_INNER} \
--dropout=0.15 \
--dropatt=0.15 \
--learning_rate=0.00025 \
--warmup_steps=4000 \
--train_steps=400000 \
--tgt_len=${TGT_LEN} \
--mem_len=${MEM_LEN} \
--train_batch_size=${TRAIN_BSZ} \
--use_tpu=True \
--num_host=${NUM_HOST} \
--num_core_per_host=${NUM_CORE} \
--iterations=1000 \
--save_steps=10000 \
--do_train=True \
--do_eval=False \
${@:2}
elif [[ $1 == 'eval' ]]; then
echo 'Run evaluation...'
python train.py \
--data_dir=${GSDATA}/text8-tfrecords \
--record_info_dir=${LOCAL_DIR}/tfrecords/ \
--corpus_info_path=${LOCAL_DIR}/corpus-info.json \
--model_dir=${GSEXP}/text8 \
--n_layer=${N_LAYER} \
--d_model=${D_MODEL} \
--d_embed=${D_EMBED} \
--n_head=${N_HEAD} \
--d_head=${D_HEAD} \
--d_inner=${D_INNER} \
--tgt_len=${TEST_TGT_LEN} \
--mem_len=${TEST_MEM_LEN} \
--eval_batch_size=${TEST_BSZ} \
--num_host=${TEST_NUM_HOST} \
--num_core_per_host=${TEST_NUM_CORE} \
--use_tpu=True \
--do_train=False \
--do_eval_only=True \
--eval_split=test \
${@:2}
else
echo 'unknown argment 1'
fi
#!/bin/bash
# Data
DATA_ROOT=../data/wikitext-103/
# Model
DIV_VAL=1
N_LAYER=16
D_MODEL=410
D_EMBED=410
N_HEAD=10
D_HEAD=41
D_INNER=2100
# Training
TGT_LEN=150
MEM_LEN=150
BSZ=60
NUM_CORE=4
# Testing
TEST_TGT_LEN=64
TEST_MEM_LEN=640
TEST_CLAMP_LEN=400
TEST_BSZ=10
TEST_NUM_CORE=1
if [[ $1 == 'train_data' ]]; then
python data_utils.py \
--data_dir=${DATA_ROOT}/ \
--dataset=wt103 \
--tgt_len=${TGT_LEN} \
--per_host_train_bsz=${BSZ} \
--per_host_valid_bsz=${BSZ} \
--num_passes=1 \
--use_tpu=False \
${@:2}
elif [[ $1 == 'test_data' ]]; then
python data_utils.py \
--data_dir=${DATA_ROOT}/ \
--dataset=enwik8 \
--tgt_len=${TEST_TGT_LEN} \
--per_host_test_bsz=${TEST_BSZ} \
--num_passes=1 \
--use_tpu=False \
${@:2}
elif [[ $1 == 'train' ]]; then
echo 'Run training...'
python train_gpu.py \
--data_dir=${DATA_ROOT}/tfrecords \
--record_info_dir=${DATA_ROOT}/tfrecords/ \
--corpus_info_path=${DATA_ROOT}/corpus-info.json \
--model_dir=EXP-wt103 \
--div_val=${DIV_VAL} \
--untie_r=True \
--proj_share_all_but_first=True \
--n_layer=${N_LAYER} \
--d_model=${D_MODEL} \
--d_embed=${D_EMBED} \
--n_head=${N_HEAD} \
--d_head=${D_HEAD} \
--d_inner=${D_INNER} \
--dropout=0.1 \
--dropatt=0.0 \
--learning_rate=0.00025 \
--warmup_steps=0 \
--train_steps=400000 \
--tgt_len=${TGT_LEN} \
--mem_len=${MEM_LEN} \
--train_batch_size=${BSZ} \
--num_core_per_host=${NUM_CORE} \
--iterations=200 \
--save_steps=4000 \
${@:2}
elif [[ $1 == 'eval' ]]; then
echo 'Run evaluation...'
python train_gpu.py \
--data_dir=${DATA_ROOT}/tfrecords \
--record_info_dir=${DATA_ROOT}/tfrecords/ \
--corpus_info_path=${DATA_ROOT}/corpus-info.json \
--model_dir=EXP-wt103 \
--div_val=${DIV_VAL} \
--untie_r=True \
--proj_share_all_but_first=True \
--n_layer=${N_LAYER} \
--d_model=${D_MODEL} \
--d_embed=${D_EMBED} \
--n_head=${N_HEAD} \
--d_head=${D_HEAD} \
--d_inner=${D_INNER} \
--dropout=0.0 \
--dropatt=0.0 \
--tgt_len=${TEST_TGT_LEN} \
--mem_len=${TEST_MEM_LEN} \
--clamp_len=${TEST_CLAMP_LEN} \
--same_length=True \
--eval_batch_size=${TEST_BSZ} \
--num_core_per_host=${TEST_NUM_CORE} \
--do_train=False \
--do_eval=True \
--eval_split=test \
${@:2}
else
echo 'unknown argment 1'
fi
\ No newline at end of file
#!/bin/bash
# Path
LOCAL_DIR=../data/wikitext-103/
GSDATA=
GSEXP=
# TPU setting
NUM_HOST=4
NUM_CORE=16 # TPUv2 -> 8 | TPUv3 -> 16
TEST_NUM_HOST=1
TEST_NUM_CORE=8 # TPUv2 -> 8 | TPUv3 -> 16
# Model
DIV_VAL=4
N_LAYER=18
D_MODEL=1024
D_EMBED=1024
N_HEAD=16
D_HEAD=64
D_INNER=4096
# Training
TGT_LEN=384
MEM_LEN=384
TRAIN_BSZ=128
VALID_BSZ=128
# Testing
TEST_TGT_LEN=128
TEST_MEM_LEN=1600
TEST_CLAMP_LEN=1000
TEST_BSZ=8
if [[ $1 == 'train_data' ]]; then
python data_utils.py \
--data_dir=${LOCAL_DIR}/ \
--dataset=wt103 \
--tgt_len=${TGT_LEN} \
--per_host_train_bsz=${TRAIN_BSZ} \
--per_host_valid_bsz=${VALID_BSZ} \
--num_core_per_host=${NUM_CORE} \
--num_passes=10 \
--use_tpu=True \
${@:2}
SRC_PATTERN=train.bsz-${TRAIN_BSZ}.tlen-${TGT_LEN}.core-${NUM_CORE}*
gsutil cp ${LOCAL_DIR}/tfrecords/${SRC_PATTERN} ${GSDATA}/wt103-tfrecords/
SRC_PATTERN=valid.bsz-${VALID_BSZ}.tlen-${TGT_LEN}.core-${NUM_CORE}*
gsutil cp ${LOCAL_DIR}/tfrecords/${SRC_PATTERN} ${GSDATA}/wt103-tfrecords/
elif [[ $1 == 'test_data' ]]; then
python data_utils.py \
--data_dir=${LOCAL_DIR}/ \
--dataset=wt103 \
--tgt_len=${TEST_TGT_LEN} \
--per_host_test_bsz=${TEST_BSZ} \
--num_core_per_host=${TEST_NUM_CORE} \
--num_passes=1 \
--use_tpu=True \
${@:2}
SRC_PATTERN=test.bsz-${TEST_BSZ}.tlen-${TEST_TGT_LEN}.core-${TEST_NUM_CORE}*
gsutil cp ${LOCAL_DIR}/tfrecords/${SRC_PATTERN} ${GSDATA}/wt103-tfrecords/
elif [[ $1 == 'train' ]]; then
echo 'Run training...'
python train.py \
--data_dir=${GSDATA}/wt103-tfrecords \
--record_info_dir=${LOCAL_DIR}/tfrecords/ \
--corpus_info_path=${LOCAL_DIR}/corpus-info.json \
--model_dir=${GSEXP}/wt103 \
--div_val=${DIV_VAL} \
--untie_r=True \
--proj_share_all_but_first=True \
--proj_same_dim=True \
--n_layer=${N_LAYER} \
--d_model=${D_MODEL} \
--d_embed=${D_EMBED} \
--n_head=${N_HEAD} \
--d_head=${D_HEAD} \
--d_inner=${D_INNER} \
--dropout=0.2 \
--dropatt=0.2 \
--init_std=0.005 \
--learning_rate=0.00025 \
--warmup_steps=16000 \
--train_steps=4000000 \
--tgt_len=${TGT_LEN} \
--mem_len=${MEM_LEN} \
--train_batch_size=${TRAIN_BSZ} \
--num_hosts=${NUM_HOST} \
--num_core_per_host=${NUM_CORE} \
--iterations=1000 \
--save_steps=10000 \
--use_tpu=True \
--do_eval=False \
${@:2}
elif [[ $1 == 'eval' ]]; then
echo 'Run evaluation...'
python train.py \
--data_dir=${GSDATA}/wt103-tfrecords \
--record_info_dir=${LOCAL_DIR}/tfrecords/ \
--corpus_info_path=${LOCAL_DIR}/corpus-info.json \
--model_dir=${GSEXP}/wt103 \
--div_val=${DIV_VAL} \
--untie_r=True \
--proj_share_all_but_first=True \
--proj_same_dim=True \
--n_layer=${N_LAYER} \
--d_model=${D_MODEL} \
--d_embed=${D_EMBED} \
--n_head=${N_HEAD} \
--d_head=${D_HEAD} \
--d_inner=${D_INNER} \
--tgt_len=${TEST_TGT_LEN} \
--mem_len=${TEST_MEM_LEN} \
--clamp_len=${TEST_CLAMP_LEN} \
--same_length=True \
--eval_batch_size=${TEST_BSZ} \
--num_host=${TEST_NUM_HOST} \
--num_core_per_host=${TEST_NUM_CORE} \
--use_tpu=True \
--do_train=False \
--do_eval_only=True \
--eval_split=test \
${@:2}
else
echo 'unknown argment 1'
fi
#!/bin/bash
URL=http://curtis.ml.cmu.edu/datasets/pretrained_xl
DATA_ROOT=./
function download () {
fileurl=${1}
filename=${fileurl##*/}
if [ ! -f ${filename} ]; then
echo ">>> Download '${filename}' from '${fileurl}'."
wget --quiet ${fileurl}
else
echo "*** File '${filename}' exists. Skip."
fi
}
cd $DATA_ROOT
mkdir -p pretrained_xl && cd pretrained_xl
# enwik8
mkdir -p tf_enwik8 && cd tf_enwik8
mkdir -p data && cd data
download ${URL}/tf_enwiki8/data/cache.pkl
download ${URL}/tf_enwiki8/data/corpus-info.json
cd ..
mkdir -p model && cd model
download ${URL}/tf_enwiki8/model/checkpoint
download ${URL}/tf_enwiki8/model/model.ckpt-0.data-00000-of-00001
download ${URL}/tf_enwiki8/model/model.ckpt-0.index
download ${URL}/tf_enwiki8/model/model.ckpt-0.meta
cd ..
cd ..
# text8
mkdir -p tf_text8 && cd tf_text8
mkdir -p data && cd data
download ${URL}/tf_text8/data/cache.pkl
download ${URL}/tf_text8/data/corpus-info.json
cd ..
mkdir -p model && cd model
download ${URL}/tf_text8/model/checkpoint
download ${URL}/tf_text8/model/model.ckpt-0.data-00000-of-00001
download ${URL}/tf_text8/model/model.ckpt-0.index
download ${URL}/tf_text8/model/model.ckpt-0.meta
cd ..
cd ..
# wt103
mkdir -p tf_wt103 && cd tf_wt103
mkdir -p data && cd data
download ${URL}/tf_wt103/data/cache.pkl
download ${URL}/tf_wt103/data/corpus-info.json
cd ..
mkdir -p model && cd model
download ${URL}/tf_wt103/model/checkpoint
download ${URL}/tf_wt103/model/model.ckpt-0.data-00000-of-00001
download ${URL}/tf_wt103/model/model.ckpt-0.index
download ${URL}/tf_wt103/model/model.ckpt-0.meta
cd ..
cd ..
# lm1b
mkdir -p tf_lm1b && cd tf_lm1b
mkdir -p data && cd data
download ${URL}/tf_lm1b/data/cache.pkl
download ${URL}/tf_lm1b/data/corpus-info.json
cd ..
mkdir -p model && cd model
download ${URL}/tf_lm1b/model/checkpoint
download ${URL}/tf_lm1b/model/model.ckpt-1191000.data-00000-of-00001
download ${URL}/tf_lm1b/model/model.ckpt-1191000.index
download ${URL}/tf_lm1b/model/model.ckpt-1191000.meta
cd ..
cd ..
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment