ModelLink2/examples/legacy/llama2/pretrain_llama2_70b_ptd.sh
guoxinjie 670f729060 !1760 整理主线分支 README
Merge pull request !1760 from guoxinjie/arrange_readme
2024-10-25 01:33:31 +00:00

95 lines
2.2 KiB
Bash

#!/bin/bash
export CUDA_DEVICE_MAX_CONNECTIONS=1
GPUS_PER_NODE=8
MASTER_ADDR=localhost
MASTER_PORT=6000
NNODES=4
NODE_RANK=0
WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES))
CKPT_SAVE_DIR="your model save ckpt path"
DATA_PATH="your data path"
TOKENIZER_MODEL="your tokenizer path"
CKPT_LOAD_DIR="your model ckpt path"
TP=8
PP=4
DISTRIBUTED_ARGS="
--nproc_per_node $GPUS_PER_NODE \
--nnodes $NNODES \
--node_rank $NODE_RANK \
--master_addr $MASTER_ADDR \
--master_port $MASTER_PORT
"
GPT_ARGS="
--tensor-model-parallel-size ${TP} \
--pipeline-model-parallel-size ${PP} \
--sequence-parallel \
--num-layers 80 \
--hidden-size 8192 \
--ffn-hidden-size 28672 \
--num-attention-heads 64 \
--tokenizer-type Llama2Tokenizer \
--tokenizer-model ${TOKENIZER_MODEL} \
--seq-length 4096 \
--max-position-embeddings 4096 \
--micro-batch-size 1 \
--global-batch-size 1024 \
--make-vocab-size-divisible-by 1 \
--lr 1.0e-6 \
--train-iters 5000 \
--lr-decay-style cosine \
--untie-embeddings-and-output-weights \
--attention-dropout 0.0 \
--init-method-std 0.01 \
--hidden-dropout 0.0 \
--position-embedding-type rope \
--normalization RMSNorm \
--use-fused-rmsnorm \
--swiglu \
--use-flash-attn \
--no-masked-softmax-fusion \
--attention-softmax-in-fp32 \
--min-lr 1.0e-7 \
--weight-decay 0.1 \
--clip-grad 1.0 \
--adam-beta1 0.9 \
--initial-loss-scale 4096.0 \
--adam-beta2 0.95 \
--adam-eps 1e-5 \
--no-gradient-accumulation-fusion \
--load ${CKPT_LOAD_DIR} \
--no-load-optim \
--no-load-rng \
--disable-bias-linear \
--group-query-attention \
--num-query-groups 8 \
--lr-warmup-fraction 0.01 \
--bf16 \
--use-fused-swiglu \
--use-fused-rotary-pos-emb \
--num-layers-per-virtual-pipeline-stage 5 \
--use-mc2 \
"
DATA_ARGS="
--data-path $DATA_PATH \
--split 949,50,1
"
OUTPUT_ARGS="
--log-interval 1 \
--save-interval 10000 \
--eval-interval 1000 \
--eval-iters 10 \
"
torchrun $DISTRIBUTED_ARGS pretrain_gpt.py \
$GPT_ARGS \
$DATA_ARGS \
$OUTPUT_ARGS \
--distributed-backend nccl \
--save $CKPT_SAVE_DIR \
| tee logs/train_llama2_70b.log