chore(transformer-xl): Initial commit

This commit is contained in:
Tibo De Peuter 2025-11-07 12:58:13 +01:00
parent ef4684ef39
commit 10512876f2
Signed by: tdpeuter
GPG key ID: 38297DE43F75FFE2
46 changed files with 10547 additions and 0 deletions

View file

@ -0,0 +1,102 @@
#!/bin/bash
# Data
DATA_ROOT=../data/enwik8/
# Model
N_LAYER=12
D_MODEL=512
D_EMBED=512
N_HEAD=8
D_HEAD=64
D_INNER=2048
# Training
TGT_LEN=512
MEM_LEN=512
BSZ=24
NUM_CORE=4
# Testing
TEST_TGT_LEN=80
TEST_MEM_LEN=2100
TEST_CLAMP_LEN=820
TEST_BSZ=10
TEST_NUM_CORE=1
if [[ $1 == 'train_data' ]]; then
python data_utils.py \
--data_dir=${DATA_ROOT}/ \
--dataset=enwik8 \
--tgt_len=${TGT_LEN} \
--per_host_train_bsz=${BSZ} \
--per_host_valid_bsz=${BSZ} \
--num_passes=1 \
--use_tpu=False \
${@:2}
elif [[ $1 == 'test_data' ]]; then
python data_utils.py \
--data_dir=${DATA_ROOT}/ \
--dataset=enwik8 \
--tgt_len=${TEST_TGT_LEN} \
--per_host_test_bsz=${TEST_BSZ} \
--num_passes=1 \
--use_tpu=False \
${@:2}
elif [[ $1 == 'train' ]]; then
echo 'Run training...'
python train_gpu.py \
--data_dir=${DATA_ROOT}/tfrecords \
--record_info_dir=${DATA_ROOT}/tfrecords/ \
--corpus_info_path=${DATA_ROOT}/corpus-info.json \
--model_dir=EXP-enwik8 \
--n_layer=${N_LAYER} \
--d_model=${D_MODEL} \
--d_embed=${D_EMBED} \
--n_head=${N_HEAD} \
--d_head=${D_HEAD} \
--d_inner=${D_INNER} \
--dropout=0.1 \
--dropatt=0.0 \
--learning_rate=0.00025 \
--warmup_steps=0 \
--train_steps=400000 \
--tgt_len=${TGT_LEN} \
--mem_len=${MEM_LEN} \
--train_batch_size=${BSZ} \
--num_core_per_host=${NUM_CORE} \
--iterations=200 \
--save_steps=4000 \
--do_train=True \
--do_eval=False \
${@:2}
elif [[ $1 == 'eval' ]]; then
echo 'Run evaluation...'
python train_gpu.py \
--data_dir=${DATA_ROOT}/tfrecords \
--record_info_dir=${DATA_ROOT}/tfrecords/ \
--corpus_info_path=${DATA_ROOT}/corpus-info.json \
--model_dir=EXP-enwik8 \
--n_layer=${N_LAYER} \
--d_model=${D_MODEL} \
--d_embed=${D_EMBED} \
--n_head=${N_HEAD} \
--d_head=${D_HEAD} \
--d_inner=${D_INNER} \
--dropout=0.0 \
--dropatt=0.0 \
--tgt_len=${TEST_TGT_LEN} \
--mem_len=${TEST_MEM_LEN} \
--clamp_len=${TEST_CLAMP_LEN} \
--same_length=True \
--eval_batch_size=${TEST_BSZ} \
--num_core_per_host=${TEST_NUM_CORE} \
--do_train=False \
--do_eval=True \
--eval_split=test \
${@:2}
else
echo 'unknown argment 1'
fi

View file

@ -0,0 +1,122 @@
#!/bin/bash
# Path
LOCAL_DIR=../data/enwik8/
GSDATA=
GSEXP=
# TPU setting
NUM_HOST=2
NUM_CORE=16 # TPUv2 -> 8 | TPUv3 -> 16
TEST_NUM_HOST=1
TEST_NUM_CORE=8 # TPUv2 -> 8 | TPUv3 -> 16
# Model
N_LAYER=24
D_MODEL=1024
D_EMBED=1024
N_HEAD=8
D_HEAD=128
D_INNER=3072
# Training
TGT_LEN=768
MEM_LEN=768
TRAIN_BSZ=64
VALID_BSZ=64
# Testing
TEST_TGT_LEN=128
TEST_MEM_LEN=3800
TEST_CLAMP_LEN=1000
TEST_BSZ=16
if [[ $1 == 'train_data' ]]; then
python data_utils.py \
--data_dir=${LOCAL_DIR}/ \
--dataset=enwik8 \
--tgt_len=${TGT_LEN} \
--per_host_train_bsz=${TRAIN_BSZ} \
--per_host_valid_bsz=${VALID_BSZ} \
--num_core_per_host=${NUM_CORE} \
--num_passes=10 \
--use_tpu=True \
${@:2}
SRC_PATTERN=train.bsz-${TRAIN_BSZ}.tlen-${TGT_LEN}.core-${NUM_CORE}*
gsutil cp ${LOCAL_DIR}/tfrecords/${SRC_PATTERN} ${GSDATA}/enwik8-tfrecords/
SRC_PATTERN=valid.bsz-${VALID_BSZ}.tlen-${TGT_LEN}.core-${NUM_CORE}*
gsutil cp ${LOCAL_DIR}/tfrecords/${SRC_PATTERN} ${GSDATA}/enwik8-tfrecords/
elif [[ $1 == 'test_data' ]]; then
python data_utils.py \
--data_dir=${LOCAL_DIR}/ \
--dataset=enwik8 \
--tgt_len=${TEST_TGT_LEN} \
--per_host_test_bsz=${TEST_BSZ} \
--num_core_per_host=${TEST_NUM_CORE} \
--num_passes=1 \
--use_tpu=True \
${@:2}
SRC_PATTERN=test.bsz-${TEST_BSZ}.tlen-${TEST_TGT_LEN}.core-${TEST_NUM_CORE}*
gsutil cp ${LOCAL_DIR}/tfrecords/${SRC_PATTERN} ${GSDATA}/enwik8-tfrecords/
elif [[ $1 == 'train' ]]; then
echo 'Run training...'
python train.py \
--data_dir=${GSDATA}/enwik8-tfrecords \
--record_info_dir=${LOCAL_DIR}/tfrecords/ \
--corpus_info_path=${LOCAL_DIR}/corpus-info.json \
--model_dir=${GSEXP}/enwik8 \
--n_layer=${N_LAYER} \
--d_model=${D_MODEL} \
--d_embed=${D_EMBED} \
--n_head=${N_HEAD} \
--d_head=${D_HEAD} \
--d_inner=${D_INNER} \
--dropout=0.15 \
--dropatt=0.15 \
--learning_rate=0.00025 \
--warmup_steps=4000 \
--train_steps=400000 \
--tgt_len=${TGT_LEN} \
--mem_len=${MEM_LEN} \
--train_batch_size=${TRAIN_BSZ} \
--use_tpu=True \
--num_host=${NUM_HOST} \
--num_core_per_host=${NUM_CORE} \
--iterations=1000 \
--save_steps=10000 \
--do_train=True \
--do_eval=False \
${@:2}
elif [[ $1 == 'eval' ]]; then
echo 'Run evaluation...'
python train.py \
--data_dir=${GSDATA}/enwik8-tfrecords \
--record_info_dir=${LOCAL_DIR}/tfrecords/ \
--corpus_info_path=${LOCAL_DIR}/corpus-info.json \
--model_dir=${GSEXP}/enwik8 \
--n_layer=${N_LAYER} \
--d_model=${D_MODEL} \
--d_embed=${D_EMBED} \
--n_head=${N_HEAD} \
--d_head=${D_HEAD} \
--d_inner=${D_INNER} \
--tgt_len=${TEST_TGT_LEN} \
--mem_len=${TEST_MEM_LEN} \
--eval_batch_size=${TEST_BSZ} \
--num_host=${TEST_NUM_HOST} \
--num_core_per_host=${TEST_NUM_CORE} \
--use_tpu=True \
--do_train=False \
--do_eval_only=True \
--eval_split=test \
${@:2}
else
echo 'unknown argment 1'
fi

View file

@ -0,0 +1,110 @@
#!/bin/bash
# Data
DATA_ROOT=../data/one-billion-words/
# Model
DIV_VAL=4
N_LAYER=18
D_MODEL=1024
D_EMBED=1024
N_HEAD=8
D_HEAD=128
D_INNER=4096
# Training
TGT_LEN=256
MEM_LEN=256
BSZ=256
NUM_CORE=4
# Testing
TEST_TGT_LEN=32
TEST_MEM_LEN=128
TEST_CLAMP_LEN=-1
TEST_BSZ=16
TEST_NUM_CORE=1
if [[ $1 == 'train_data' ]]; then
python data_utils.py \
--data_dir=${DATA_ROOT}/ \
--dataset=lm1b \
--tgt_len=${TGT_LEN} \
--per_host_train_bsz=${BSZ} \
--per_host_valid_bsz=${BSZ} \
--num_passes=1 \
--use_tpu=False \
${@:2}
elif [[ $1 == 'test_data' ]]; then
python data_utils.py \
--data_dir=${DATA_ROOT}/ \
--dataset=lm1b \
--tgt_len=${TEST_TGT_LEN} \
--per_host_test_bsz=${TEST_BSZ} \
--num_passes=1 \
--use_tpu=False \
${@:2}
elif [[ $1 == 'train' ]]; then
echo 'Run training...'
python train_gpu.py \
--data_dir=${DATA_ROOT}/tfrecords \
--record_info_dir=${DATA_ROOT}/tfrecords/ \
--corpus_info_path=${DATA_ROOT}/corpus-info.json \
--model_dir=EXP-lm1b \
--div_val=${DIV_VAL} \
--untie_r=True \
--proj_share_all_but_first=False \
--proj_same_dim=False \
--n_layer=${N_LAYER} \
--d_model=${D_MODEL} \
--d_embed=${D_EMBED} \
--n_head=${N_HEAD} \
--d_head=${D_HEAD} \
--d_inner=${D_INNER} \
--dropout=0.1 \
--dropatt=0.0 \
--learning_rate=0.00025 \
--warmup_steps=0 \
--train_steps=400000 \
--tgt_len=${TGT_LEN} \
--mem_len=${MEM_LEN} \
--train_batch_size=${BSZ} \
--num_core_per_host=${NUM_CORE} \
--iterations=200 \
--save_steps=4000 \
${@:2}
elif [[ $1 == 'eval' ]]; then
echo 'Run evaluation...'
python train_gpu.py \
--data_dir=${DATA_ROOT}/tfrecords \
--record_info_dir=${DATA_ROOT}/tfrecords/ \
--corpus_info_path=${DATA_ROOT}/corpus-info.json \
--model_dir=EXP-lm1b \
--div_val=${DIV_VAL} \
--untie_r=True \
--proj_share_all_but_first=False \
--proj_same_dim=False \
--n_layer=${N_LAYER} \
--d_model=${D_MODEL} \
--d_embed=${D_EMBED} \
--n_head=${N_HEAD} \
--d_head=${D_HEAD} \
--d_inner=${D_INNER} \
--dropout=0.0 \
--dropatt=0.0 \
--tgt_len=${TEST_TGT_LEN} \
--mem_len=${TEST_MEM_LEN} \
--clamp_len=${TEST_CLAMP_LEN} \
--same_length=True \
--eval_batch_size=${TEST_BSZ} \
--num_core_per_host=${TEST_NUM_CORE} \
--do_train=False \
--do_eval=True \
--eval_split=test \
${@:2}
else
echo 'unknown argment 1'
fi

View file

@ -0,0 +1,136 @@
#!/bin/bash
# Path
LOCAL_DIR=../data/one-billion-words/
GSDATA=
GSEXP=
# TPU setting
NUM_HOST=32
NUM_CORE=16 # TPUv2 -> 8 | TPUv3 -> 16
TEST_NUM_HOST=1
TEST_NUM_CORE=8 # TPUv2 -> 8 | TPUv3 -> 16
# Model
DIV_VAL=4
N_LAYER=24
D_MODEL=1280
D_EMBED=1280
N_HEAD=16
D_HEAD=80
D_INNER=8192
# Training
TGT_LEN=32
MEM_LEN=32
TRAIN_BSZ=512
VALID_BSZ=512
TRAIN_BSZ_PER_HOST=$((TRAIN_BSZ / NUM_HOST))
VALID_BSZ_PER_HOST=$((VALID_BSZ / NUM_HOST))
# Testing
TEST_TGT_LEN=32
TEST_MEM_LEN=128
TEST_CLAMP_LEN=-1
TEST_BSZ=8
if [[ $1 == 'train_data' ]]; then
python data_utils.py \
--data_dir=${LOCAL_DIR}/ \
--dataset=lm1b \
--tgt_len=${TGT_LEN} \
--per_host_train_bsz=${TRAIN_BSZ_PER_HOST} \
--per_host_valid_bsz=${VALID_BSZ_PER_HOST} \
--num_core_per_host=${NUM_CORE} \
--num_passes=10 \
--use_tpu=True \
${@:2}
SRC_PATTERN=train.bsz-${TRAIN_BSZ}.tlen-${TGT_LEN}.core-${NUM_CORE}*
gsutil cp ${LOCAL_DIR}/tfrecords/${SRC_PATTERN} ${GSDATA}/lm1b-tfrecords/
SRC_PATTERN=valid.bsz-${VALID_BSZ}.tlen-${TGT_LEN}.core-${NUM_CORE}*
gsutil cp ${LOCAL_DIR}/tfrecords/${SRC_PATTERN} ${GSDATA}/lm1b-tfrecords/
elif [[ $1 == 'test_data' ]]; then
python data_utils.py \
--data_dir=${LOCAL_DIR}/ \
--dataset=lm1b \
--tgt_len=${TEST_TGT_LEN} \
--per_host_test_bsz=${TEST_BSZ} \
--num_core_per_host=${TEST_NUM_CORE} \
--num_passes=1 \
--use_tpu=True \
${@:2}
SRC_PATTERN=test.bsz-${TEST_BSZ}.tlen-${TEST_TGT_LEN}.core-${TEST_NUM_CORE}*
gsutil cp ${LOCAL_DIR}/tfrecords/${SRC_PATTERN} ${GSDATA}/lm1b-tfrecords/
elif [[ $1 == 'train' ]]; then
echo 'Run training...'
python train.py \
--data_dir=${GSDATA}/lm1b-tfrecords \
--record_info_dir=${LOCAL_DIR}/tfrecords/ \
--corpus_info_path=${LOCAL_DIR}/corpus-info.json \
--model_dir=${GSEXP}/lm1b \
--div_val=${DIV_VAL} \
--untie_r=True \
--proj_share_all_but_first=False \
--proj_same_dim=False \
--n_layer=${N_LAYER} \
--d_model=${D_MODEL} \
--d_embed=${D_EMBED} \
--n_head=${N_HEAD} \
--d_head=${D_HEAD} \
--d_inner=${D_INNER} \
--dropout=0.05 \
--dropatt=0.05 \
--init_std=0.005 \
--learning_rate=0.0001 \
--warmup_steps=30000 \
--train_steps=1200000 \
--tgt_len=${TGT_LEN} \
--mem_len=${MEM_LEN} \
--train_batch_size=${TRAIN_BSZ} \
--num_hosts=${NUM_HOST} \
--num_core_per_host=${NUM_CORE} \
--iterations=1000 \
--save_steps=10000 \
--use_tpu=True \
--do_eval=False \
${@:2}
elif [[ $1 == 'eval' ]]; then
echo 'Run evaluation...'
python train.py \
--data_dir=${GSDATA}/lm1b-tfrecords \
--record_info_dir=${LOCAL_DIR}/tfrecords/ \
--corpus_info_path=${LOCAL_DIR}/corpus-info.json \
--model_dir=${GSEXP}/lm1b \
--div_val=${DIV_VAL} \
--untie_r=True \
--proj_share_all_but_first=False \
--proj_same_dim=False \
--n_layer=${N_LAYER} \
--d_model=${D_MODEL} \
--d_embed=${D_EMBED} \
--n_head=${N_HEAD} \
--d_head=${D_HEAD} \
--d_inner=${D_INNER} \
--tgt_len=${TEST_TGT_LEN} \
--mem_len=${TEST_MEM_LEN} \
--clamp_len=${TEST_CLAMP_LEN} \
--same_length=True \
--eval_batch_size=${TEST_BSZ} \
--num_host=${TEST_NUM_HOST} \
--num_core_per_host=${TEST_NUM_CORE} \
--use_tpu=True \
--do_train=False \
--do_eval_only=True \
--eval_split=test \
${@:2}
else
echo 'unknown argment 1'
fi

View file

@ -0,0 +1,102 @@
#!/bin/bash
# Data
DATA_ROOT=../data/text8/
# Model
N_LAYER=12
D_MODEL=512
D_EMBED=512
N_HEAD=8
D_HEAD=64
D_INNER=2048
# Training
TGT_LEN=512
MEM_LEN=512
BSZ=24
NUM_CORE=4
# Testing
TEST_TGT_LEN=80
TEST_MEM_LEN=2100
TEST_CLAMP_LEN=820
TEST_BSZ=10
TEST_NUM_CORE=1
if [[ $1 == 'train_data' ]]; then
python data_utils.py \
--data_dir=${DATA_ROOT}/ \
--dataset=text8 \
--tgt_len=${TGT_LEN} \
--per_host_train_bsz=${BSZ} \
--per_host_valid_bsz=${BSZ} \
--num_passes=1 \
--use_tpu=False \
${@:2}
elif [[ $1 == 'test_data' ]]; then
python data_utils.py \
--data_dir=${DATA_ROOT}/ \
--dataset=text8 \
--tgt_len=${TEST_TGT_LEN} \
--per_host_test_bsz=${TEST_BSZ} \
--num_passes=1 \
--use_tpu=False \
${@:2}
elif [[ $1 == 'train' ]]; then
echo 'Run training...'
python train_gpu.py \
--data_dir=${DATA_ROOT}/tfrecords \
--record_info_dir=${DATA_ROOT}/tfrecords/ \
--corpus_info_path=${DATA_ROOT}/corpus-info.json \
--model_dir=EXP-text8 \
--n_layer=${N_LAYER} \
--d_model=${D_MODEL} \
--d_embed=${D_EMBED} \
--n_head=${N_HEAD} \
--d_head=${D_HEAD} \
--d_inner=${D_INNER} \
--dropout=0.1 \
--dropatt=0.0 \
--learning_rate=0.00025 \
--warmup_steps=0 \
--train_steps=400000 \
--tgt_len=${TGT_LEN} \
--mem_len=${MEM_LEN} \
--train_batch_size=${BSZ} \
--num_core_per_host=${NUM_CORE} \
--iterations=200 \
--save_steps=4000 \
--do_train=True \
--do_eval=False \
${@:2}
elif [[ $1 == 'eval' ]]; then
echo 'Run evaluation...'
python train_gpu.py \
--data_dir=${DATA_ROOT}/tfrecords \
--record_info_dir=${DATA_ROOT}/tfrecords/ \
--corpus_info_path=${DATA_ROOT}/corpus-info.json \
--model_dir=EXP-text8 \
--n_layer=${N_LAYER} \
--d_model=${D_MODEL} \
--d_embed=${D_EMBED} \
--n_head=${N_HEAD} \
--d_head=${D_HEAD} \
--d_inner=${D_INNER} \
--dropout=0.0 \
--dropatt=0.0 \
--tgt_len=${TEST_TGT_LEN} \
--mem_len=${TEST_MEM_LEN} \
--clamp_len=${TEST_CLAMP_LEN} \
--same_length=True \
--eval_batch_size=${TEST_BSZ} \
--num_core_per_host=${TEST_NUM_CORE} \
--do_train=False \
--do_eval=True \
--eval_split=test \
${@:2}
else
echo 'unknown argment 1'
fi

View file

@ -0,0 +1,122 @@
#!/bin/bash
# Path
LOCAL_DIR=../data/text8/
GSDATA=
GSEXP=
# TPU setting
NUM_HOST=2
NUM_CORE=16 # TPUv2 -> 8 | TPUv3 -> 16
TEST_NUM_HOST=1
TEST_NUM_CORE=8 # TPUv2 -> 8 | TPUv3 -> 16
# Model
N_LAYER=24
D_MODEL=1024
D_EMBED=1024
N_HEAD=8
D_HEAD=128
D_INNER=3072
# Training
TGT_LEN=768
MEM_LEN=768
TRAIN_BSZ=64
VALID_BSZ=64
# Testing
TEST_TGT_LEN=128
TEST_MEM_LEN=3800
TEST_CLAMP_LEN=1000
TEST_BSZ=16
if [[ $1 == 'train_data' ]]; then
python data_utils.py \
--data_dir=${LOCAL_DIR}/ \
--dataset=text8 \
--tgt_len=${TGT_LEN} \
--per_host_train_bsz=${TRAIN_BSZ} \
--per_host_valid_bsz=${VALID_BSZ} \
--num_core_per_host=${NUM_CORE} \
--num_passes=10 \
--use_tpu=True \
${@:2}
SRC_PATTERN=train.bsz-${TRAIN_BSZ}.tlen-${TGT_LEN}.core-${NUM_CORE}*
gsutil cp ${LOCAL_DIR}/tfrecords/${SRC_PATTERN} ${GSDATA}/text8-tfrecords/
SRC_PATTERN=valid.bsz-${VALID_BSZ}.tlen-${TGT_LEN}.core-${NUM_CORE}*
gsutil cp ${LOCAL_DIR}/tfrecords/${SRC_PATTERN} ${GSDATA}/text8-tfrecords/
elif [[ $1 == 'test_data' ]]; then
python data_utils.py \
--data_dir=${LOCAL_DIR}/ \
--dataset=text8 \
--tgt_len=${TEST_TGT_LEN} \
--per_host_test_bsz=${TEST_BSZ} \
--num_core_per_host=${TEST_NUM_CORE} \
--num_passes=1 \
--use_tpu=True \
${@:2}
SRC_PATTERN=test.bsz-${TEST_BSZ}.tlen-${TEST_TGT_LEN}.core-${TEST_NUM_CORE}*
gsutil cp ${LOCAL_DIR}/tfrecords/${SRC_PATTERN} ${GSDATA}/text8-tfrecords/
elif [[ $1 == 'train' ]]; then
echo 'Run training...'
python train.py \
--data_dir=${GSDATA}/text8-tfrecords \
--record_info_dir=${LOCAL_DIR}/tfrecords/ \
--corpus_info_path=${LOCAL_DIR}/corpus-info.json \
--model_dir=${GSEXP}/text8 \
--n_layer=${N_LAYER} \
--d_model=${D_MODEL} \
--d_embed=${D_EMBED} \
--n_head=${N_HEAD} \
--d_head=${D_HEAD} \
--d_inner=${D_INNER} \
--dropout=0.15 \
--dropatt=0.15 \
--learning_rate=0.00025 \
--warmup_steps=4000 \
--train_steps=400000 \
--tgt_len=${TGT_LEN} \
--mem_len=${MEM_LEN} \
--train_batch_size=${TRAIN_BSZ} \
--use_tpu=True \
--num_host=${NUM_HOST} \
--num_core_per_host=${NUM_CORE} \
--iterations=1000 \
--save_steps=10000 \
--do_train=True \
--do_eval=False \
${@:2}
elif [[ $1 == 'eval' ]]; then
echo 'Run evaluation...'
python train.py \
--data_dir=${GSDATA}/text8-tfrecords \
--record_info_dir=${LOCAL_DIR}/tfrecords/ \
--corpus_info_path=${LOCAL_DIR}/corpus-info.json \
--model_dir=${GSEXP}/text8 \
--n_layer=${N_LAYER} \
--d_model=${D_MODEL} \
--d_embed=${D_EMBED} \
--n_head=${N_HEAD} \
--d_head=${D_HEAD} \
--d_inner=${D_INNER} \
--tgt_len=${TEST_TGT_LEN} \
--mem_len=${TEST_MEM_LEN} \
--eval_batch_size=${TEST_BSZ} \
--num_host=${TEST_NUM_HOST} \
--num_core_per_host=${TEST_NUM_CORE} \
--use_tpu=True \
--do_train=False \
--do_eval_only=True \
--eval_split=test \
${@:2}
else
echo 'unknown argment 1'
fi

View file

@ -0,0 +1,108 @@
#!/bin/bash
# Data
DATA_ROOT=../data/wikitext-103/
# Model
DIV_VAL=1
N_LAYER=16
D_MODEL=410
D_EMBED=410
N_HEAD=10
D_HEAD=41
D_INNER=2100
# Training
TGT_LEN=150
MEM_LEN=150
BSZ=60
NUM_CORE=4
# Testing
TEST_TGT_LEN=64
TEST_MEM_LEN=640
TEST_CLAMP_LEN=400
TEST_BSZ=10
TEST_NUM_CORE=1
if [[ $1 == 'train_data' ]]; then
python data_utils.py \
--data_dir=${DATA_ROOT}/ \
--dataset=wt103 \
--tgt_len=${TGT_LEN} \
--per_host_train_bsz=${BSZ} \
--per_host_valid_bsz=${BSZ} \
--num_passes=1 \
--use_tpu=False \
${@:2}
elif [[ $1 == 'test_data' ]]; then
python data_utils.py \
--data_dir=${DATA_ROOT}/ \
--dataset=enwik8 \
--tgt_len=${TEST_TGT_LEN} \
--per_host_test_bsz=${TEST_BSZ} \
--num_passes=1 \
--use_tpu=False \
${@:2}
elif [[ $1 == 'train' ]]; then
echo 'Run training...'
python train_gpu.py \
--data_dir=${DATA_ROOT}/tfrecords \
--record_info_dir=${DATA_ROOT}/tfrecords/ \
--corpus_info_path=${DATA_ROOT}/corpus-info.json \
--model_dir=EXP-wt103 \
--div_val=${DIV_VAL} \
--untie_r=True \
--proj_share_all_but_first=True \
--n_layer=${N_LAYER} \
--d_model=${D_MODEL} \
--d_embed=${D_EMBED} \
--n_head=${N_HEAD} \
--d_head=${D_HEAD} \
--d_inner=${D_INNER} \
--dropout=0.1 \
--dropatt=0.0 \
--learning_rate=0.00025 \
--warmup_steps=0 \
--train_steps=400000 \
--tgt_len=${TGT_LEN} \
--mem_len=${MEM_LEN} \
--train_batch_size=${BSZ} \
--num_core_per_host=${NUM_CORE} \
--iterations=200 \
--save_steps=4000 \
${@:2}
elif [[ $1 == 'eval' ]]; then
echo 'Run evaluation...'
python train_gpu.py \
--data_dir=${DATA_ROOT}/tfrecords \
--record_info_dir=${DATA_ROOT}/tfrecords/ \
--corpus_info_path=${DATA_ROOT}/corpus-info.json \
--model_dir=EXP-wt103 \
--div_val=${DIV_VAL} \
--untie_r=True \
--proj_share_all_but_first=True \
--n_layer=${N_LAYER} \
--d_model=${D_MODEL} \
--d_embed=${D_EMBED} \
--n_head=${N_HEAD} \
--d_head=${D_HEAD} \
--d_inner=${D_INNER} \
--dropout=0.0 \
--dropatt=0.0 \
--tgt_len=${TEST_TGT_LEN} \
--mem_len=${TEST_MEM_LEN} \
--clamp_len=${TEST_CLAMP_LEN} \
--same_length=True \
--eval_batch_size=${TEST_BSZ} \
--num_core_per_host=${TEST_NUM_CORE} \
--do_train=False \
--do_eval=True \
--eval_split=test \
${@:2}
else
echo 'unknown argment 1'
fi

View file

@ -0,0 +1,134 @@
#!/bin/bash
# Path
LOCAL_DIR=../data/wikitext-103/
GSDATA=
GSEXP=
# TPU setting
NUM_HOST=4
NUM_CORE=16 # TPUv2 -> 8 | TPUv3 -> 16
TEST_NUM_HOST=1
TEST_NUM_CORE=8 # TPUv2 -> 8 | TPUv3 -> 16
# Model
DIV_VAL=4
N_LAYER=18
D_MODEL=1024
D_EMBED=1024
N_HEAD=16
D_HEAD=64
D_INNER=4096
# Training
TGT_LEN=384
MEM_LEN=384
TRAIN_BSZ=128
VALID_BSZ=128
# Testing
TEST_TGT_LEN=128
TEST_MEM_LEN=1600
TEST_CLAMP_LEN=1000
TEST_BSZ=8
if [[ $1 == 'train_data' ]]; then
python data_utils.py \
--data_dir=${LOCAL_DIR}/ \
--dataset=wt103 \
--tgt_len=${TGT_LEN} \
--per_host_train_bsz=${TRAIN_BSZ} \
--per_host_valid_bsz=${VALID_BSZ} \
--num_core_per_host=${NUM_CORE} \
--num_passes=10 \
--use_tpu=True \
${@:2}
SRC_PATTERN=train.bsz-${TRAIN_BSZ}.tlen-${TGT_LEN}.core-${NUM_CORE}*
gsutil cp ${LOCAL_DIR}/tfrecords/${SRC_PATTERN} ${GSDATA}/wt103-tfrecords/
SRC_PATTERN=valid.bsz-${VALID_BSZ}.tlen-${TGT_LEN}.core-${NUM_CORE}*
gsutil cp ${LOCAL_DIR}/tfrecords/${SRC_PATTERN} ${GSDATA}/wt103-tfrecords/
elif [[ $1 == 'test_data' ]]; then
python data_utils.py \
--data_dir=${LOCAL_DIR}/ \
--dataset=wt103 \
--tgt_len=${TEST_TGT_LEN} \
--per_host_test_bsz=${TEST_BSZ} \
--num_core_per_host=${TEST_NUM_CORE} \
--num_passes=1 \
--use_tpu=True \
${@:2}
SRC_PATTERN=test.bsz-${TEST_BSZ}.tlen-${TEST_TGT_LEN}.core-${TEST_NUM_CORE}*
gsutil cp ${LOCAL_DIR}/tfrecords/${SRC_PATTERN} ${GSDATA}/wt103-tfrecords/
elif [[ $1 == 'train' ]]; then
echo 'Run training...'
python train.py \
--data_dir=${GSDATA}/wt103-tfrecords \
--record_info_dir=${LOCAL_DIR}/tfrecords/ \
--corpus_info_path=${LOCAL_DIR}/corpus-info.json \
--model_dir=${GSEXP}/wt103 \
--div_val=${DIV_VAL} \
--untie_r=True \
--proj_share_all_but_first=True \
--proj_same_dim=True \
--n_layer=${N_LAYER} \
--d_model=${D_MODEL} \
--d_embed=${D_EMBED} \
--n_head=${N_HEAD} \
--d_head=${D_HEAD} \
--d_inner=${D_INNER} \
--dropout=0.2 \
--dropatt=0.2 \
--init_std=0.005 \
--learning_rate=0.00025 \
--warmup_steps=16000 \
--train_steps=4000000 \
--tgt_len=${TGT_LEN} \
--mem_len=${MEM_LEN} \
--train_batch_size=${TRAIN_BSZ} \
--num_hosts=${NUM_HOST} \
--num_core_per_host=${NUM_CORE} \
--iterations=1000 \
--save_steps=10000 \
--use_tpu=True \
--do_eval=False \
${@:2}
elif [[ $1 == 'eval' ]]; then
echo 'Run evaluation...'
python train.py \
--data_dir=${GSDATA}/wt103-tfrecords \
--record_info_dir=${LOCAL_DIR}/tfrecords/ \
--corpus_info_path=${LOCAL_DIR}/corpus-info.json \
--model_dir=${GSEXP}/wt103 \
--div_val=${DIV_VAL} \
--untie_r=True \
--proj_share_all_but_first=True \
--proj_same_dim=True \
--n_layer=${N_LAYER} \
--d_model=${D_MODEL} \
--d_embed=${D_EMBED} \
--n_head=${N_HEAD} \
--d_head=${D_HEAD} \
--d_inner=${D_INNER} \
--tgt_len=${TEST_TGT_LEN} \
--mem_len=${TEST_MEM_LEN} \
--clamp_len=${TEST_CLAMP_LEN} \
--same_length=True \
--eval_batch_size=${TEST_BSZ} \
--num_host=${TEST_NUM_HOST} \
--num_core_per_host=${TEST_NUM_CORE} \
--use_tpu=True \
--do_train=False \
--do_eval_only=True \
--eval_split=test \
${@:2}
else
echo 'unknown argment 1'
fi