!1759 新增Qwen2-7B/Qwen1.5-4B mcore全参微调脚本

Merge pull request !1759 from 徐源徽/master
This commit is contained in:
徐源徽 2024-10-26 06:55:21 +00:00 committed by i-robot
parent 45ca70b2aa
commit de4d0664b8
8 changed files with 489 additions and 0 deletions

View File

@ -0,0 +1,59 @@
#!/bin/bash
export CUDA_DEVICE_MAX_CONNECTIONS=1
# please fill these path configurations
CHECKPOINT="your model ckpt path"
TOKENIZER_PATH="your tokenizer path"
# Change for multinode config
MASTER_ADDR=localhost
MASTER_PORT=6003
NNODES=1
NODE_RANK=0
NPUS_PER_NODE=8
WORLD_SIZE=$(($NPUS_PER_NODE*$NNODES))
TP=1
PP=8
DISTRIBUTED_ARGS="
--nproc_per_node $NPUS_PER_NODE \
--nnodes $NNODES \
--node_rank $NODE_RANK \
--master_addr $MASTER_ADDR \
--master_port $MASTER_PORT
"
torchrun $DISTRIBUTED_ARGS inference.py \
--prompt-type qwen \
--use-mcore-models \
--tensor-model-parallel-size ${TP} \
--pipeline-model-parallel-size ${PP} \
--num-layers 40 \
--hidden-size 2560 \
--num-attention-heads 20 \
--ffn-hidden-size 6912 \
--max-position-embeddings 8192 \
--seq-length 8192 \
--make-vocab-size-divisible-by 1 \
--untie-embeddings-and-output-weights \
--micro-batch-size 1 \
--swiglu \
--disable-bias-linear \
--tokenizer-type PretrainedFromHF \
--tokenizer-name-or-path ${TOKENIZER_PATH} \
--load ${CHECKPOINT} \
--normalization RMSNorm \
--position-embedding-type rope \
--norm-epsilon 1e-6 \
--hidden-dropout 0 \
--attention-dropout 0 \
--tokenizer-not-use-fast \
--add-qkv-bias \
--max-new-tokens 256 \
--seed 1234 \
--bf16 \
--rotary-base 1000000.0 \
--padded-vocab-size 151936 \
| tee logs/generate_mcore_qwen15_4b_full.log

View File

@ -0,0 +1,14 @@
# 请按照您的真实环境修改 set_env.sh 路径
source /usr/local/Ascend/ascend-toolkit/set_env.sh
mkdir ./finetune_dataset
python ./preprocess_data.py \
--input ./dataset/train-00000-of-00001-a09b74b3ef9c3b56.parquet \
--tokenizer-name-or-path ./model_from_hf/qwen15_hf/ \
--output-prefix ./finetune_dataset/alpaca \
--handler-name AlpacaStyleInstructionHandler \
--tokenizer-type PretrainedFromHF \
--workers 4 \
--log-interval 1000 \
--prompt-type qwen
# --map-keys '{"prompt":"instruction","query":"input","response":"output"}' # 默认值,可不传

View File

@ -0,0 +1,63 @@
#!/bin/bash
export CUDA_DEVICE_MAX_CONNECTIONS=1
# Change for multinode config
MASTER_ADDR=localhost
MASTER_PORT=6001
NNODES=1
NODE_RANK=0
NPUS_PER_NODE=8
WORLD_SIZE=$(($NPUS_PER_NODE*$NNODES))
# please fill these path configurations
CHECKPOINT="your model ckpt path"
TOKENIZER_PATH="your tokenizer path"
DATA_PATH="./mmlu/test"
TASK="mmlu"
TP=1
PP=8
DISTRIBUTED_ARGS="
--nproc_per_node $NPUS_PER_NODE \
--nnodes $NNODES \
--node_rank $NODE_RANK \
--master_addr $MASTER_ADDR \
--master_port $MASTER_PORT
"
# Different task needs different max_new_tokens value, please follow the instruction in readme.
torchrun $DISTRIBUTED_ARGS evaluation.py \
--prompt-type qwen \
--use-mcore-models \
--task-data-path $DATA_PATH \
--task ${TASK} \
--seq-length 8192 \
--max-new-tokens 1 \
--max-position-embeddings 8192 \
--tensor-model-parallel-size ${TP} \
--pipeline-model-parallel-size ${PP} \
--num-layers 40 \
--hidden-size 2560 \
--ffn-hidden-size 6912 \
--num-attention-heads 20 \
--disable-bias-linear \
--swiglu \
--position-embedding-type rope \
--load $CHECKPOINT \
--normalization RMSNorm \
--tokenizer-type PretrainedFromHF \
--tokenizer-name-or-path ${TOKENIZER_PATH} \
--tokenizer-not-use-fast \
--bf16 \
--micro-batch-size 1 \
--exit-on-missing-checkpoint \
--no-load-rng \
--no-load-optim \
--untie-embeddings-and-output-weights \
--add-qkv-bias \
--make-vocab-size-divisible-by 1 \
--seed 1234 \
--rotary-base 1000000.0 \
--padded-vocab-size 151936 \
| tee logs/eval_mcore_qwen15_4b_full_${TASK}.log

View File

@ -0,0 +1,101 @@
#!/bin/bash
export CUDA_DEVICE_MAX_CONNECTIONS=1
NPUS_PER_NODE=8
MASTER_ADDR=localhost
MASTER_PORT=6005
NNODES=1
NODE_RANK=0
WORLD_SIZE=$(($NPUS_PER_NODE*$NNODES))
# please fill these path configurations
CKPT_SAVE_DIR="your model save ckpt path"
DATA_PATH="your data path"
TOKENIZER_PATH="your tokenizer path"
CKPT_LOAD_DIR="your model ckpt path"
TP=1
PP=8
DISTRIBUTED_ARGS="
--nproc_per_node $NPUS_PER_NODE \
--nnodes $NNODES \
--node_rank $NODE_RANK \
--master_addr $MASTER_ADDR \
--master_port $MASTER_PORT
"
GPT_ARGS="
--finetune \
--is-instruction-dataset \
--variable-seq-lengths \
--tokenizer-not-use-fast \
--prompt-type qwen \
--use-mcore-models \
--tensor-model-parallel-size ${TP} \
--pipeline-model-parallel-size ${PP} \
--num-layers 40 \
--hidden-size 2560 \
--ffn-hidden-size 6912 \
--num-attention-heads 20 \
--tokenizer-type PretrainedFromHF \
--load ${CKPT_LOAD_DIR} \
--tokenizer-name-or-path ${TOKENIZER_PATH} \
--seq-length 8192 \
--max-position-embeddings 32768 \
--micro-batch-size 1 \
--global-batch-size 64 \
--make-vocab-size-divisible-by 1 \
--lr 1.25e-6 \
--min-lr 1.25e-7 \
--train-iters 2000 \
--lr-decay-style cosine \
--untie-embeddings-and-output-weights \
--disable-bias-linear \
--attention-dropout 0.0 \
--init-method-std 0.02 \
--hidden-dropout 0.0 \
--position-embedding-type rope \
--normalization RMSNorm \
--swiglu \
--use-flash-attn \
--use-fused-swiglu \
--use-fused-rmsnorm \
--use-fused-rotary-pos-emb \
--use-distributed-optimizer \
--use-rotary-position-embeddings \
--no-masked-softmax-fusion \
--attention-softmax-in-fp32 \
--weight-decay 0.0 \
--clip-grad 1.0 \
--adam-beta1 0.9 \
--adam-beta2 0.999 \
--rotary-base 1000000.0 \
--add-qkv-bias \
--initial-loss-scale 8192 \
--no-gradient-accumulation-fusion \
--lr-warmup-fraction 0.01 \
--no-load-optim \
--no-load-rng \
--padded-vocab-size 151936 \
--bf16 \
--seed 1234
"
DATA_ARGS="
--data-path $DATA_PATH \
--split 100,0,0
"
OUTPUT_ARGS="
--log-interval 1 \
--save-interval 2000 \
"
torchrun $DISTRIBUTED_ARGS pretrain_gpt.py \
$GPT_ARGS \
$DATA_ARGS \
$OUTPUT_ARGS \
--distributed-backend nccl \
--save ${CKPT_SAVE_DIR} \
| tee logs/tune_mcore_qwen15_4b_full_2000.log

View File

@ -0,0 +1,64 @@
#!/bin/bash
export CUDA_DEVICE_MAX_CONNECTIONS=1
# please fill these path configurations
TOKENIZER_PATH="your tokenizer directory path"
CHECKPOINT="your model directory path"
# Change for multinode config
MASTER_ADDR=localhost
MASTER_PORT=6014
NNODES=1
NODE_RANK=0
NPUS_PER_NODE=8
WORLD_SIZE=$(($NPUS_PER_NODE*$NNODES))
TP=1
PP=8
SEQ_LENGTH=4096
DISTRIBUTED_ARGS="
--nproc_per_node $NPUS_PER_NODE \
--nnodes $NNODES \
--node_rank $NODE_RANK \
--master_addr $MASTER_ADDR \
--master_port $MASTER_PORT
"
torchrun $DISTRIBUTED_ARGS inference.py \
--prompt-type qwen \
--use-mcore-models \
--tensor-model-parallel-size ${TP} \
--pipeline-model-parallel-size ${PP} \
--load ${CHECKPOINT} \
--num-layers 28 \
--num-layer-list 4,4,4,4,3,3,3,3 \
--hidden-size 3584 \
--num-attention-heads 28 \
--ffn-hidden-size 18944 \
--max-position-embeddings ${SEQ_LENGTH} \
--seq-length ${SEQ_LENGTH} \
--make-vocab-size-divisible-by 1 \
--padded-vocab-size 152064 \
--rotary-base 1000000.0 \
--untie-embeddings-and-output-weights \
--micro-batch-size 1 \
--swiglu \
--disable-bias-linear \
--tokenizer-type PretrainedFromHF \
--tokenizer-name-or-path ${TOKENIZER_PATH} \
--normalization RMSNorm \
--position-embedding-type rope \
--norm-epsilon 1e-6 \
--hidden-dropout 0 \
--attention-dropout 0 \
--tokenizer-not-use-fast \
--add-qkv-bias \
--max-new-tokens 256 \
--no-gradient-accumulation-fusion \
--exit-on-missing-checkpoint \
--attention-softmax-in-fp32 \
--seed 1234 \
--group-query-attention \
--num-query-groups 4 \
| tee logs/generate_mcore_qwen2_7b_full.log

View File

@ -0,0 +1,14 @@
# 请按照您的真实环境修改 set_env.sh 路径
source /usr/local/Ascend/ascend-toolkit/set_env.sh
mkdir ./finetune_dataset
python ./preprocess_data.py \
--input ./dataset/train-00000-of-00001-a09b74b3ef9c3b56.parquet \
--tokenizer-name-or-path ./model_from_hf/qwen2_hf \
--output-prefix ./finetune_dataset/alpaca \
--handler-name AlpacaStyleInstructionHandler \
--tokenizer-type PretrainedFromHF \
--workers 16 \
--log-interval 1000 \
--prompt-type qwen
# --map-keys '{"prompt":"instruction","query":"input","response":"output"}' # 默认值,可不传

View File

@ -0,0 +1,70 @@
#!/bin/bash
export CUDA_DEVICE_MAX_CONNECTIONS=1
TOKENIZER_PATH="your tokenizer directory path"
CHECKPOINT="your model directory path"
# configure task and data path
DATA_PATH="./mmlu/test/"
TASK="mmlu"
# distributed config
MASTER_ADDR=localhost
MASTER_PORT=6014
NNODES=1
NODE_RANK=0
NPUS_PER_NODE=8
WORLD_SIZE=$(($NPUS_PER_NODE*$NNODES))
TP=1
PP=8
SEQ_LENGTH=4096
DISTRIBUTED_ARGS="
--nproc_per_node $NPUS_PER_NODE \
--nnodes $NNODES \
--node_rank $NODE_RANK \
--master_addr $MASTER_ADDR \
--master_port $MASTER_PORT
"
# Different task needs different max_new_tokens value, please follow the instruction in readme.
torchrun $DISTRIBUTED_ARGS evaluation.py \
--prompt-type qwen \
--use-mcore-models \
--task-data-path $DATA_PATH \
--task ${TASK} \
--tensor-model-parallel-size ${TP} \
--pipeline-model-parallel-size ${PP} \
--seq-length ${SEQ_LENGTH} \
--max-position-embeddings ${SEQ_LENGTH} \
--max-new-tokens 1 \
--num-layers 28 \
--num-layer-list 4,4,4,4,3,3,3,3 \
--hidden-size 3584 \
--ffn-hidden-size 18944 \
--num-attention-heads 28 \
--disable-bias-linear \
--swiglu \
--position-embedding-type rope \
--load ${CHECKPOINT} \
--normalization RMSNorm \
--tokenizer-type PretrainedFromHF \
--tokenizer-name-or-path ${TOKENIZER_PATH} \
--tokenizer-not-use-fast \
--micro-batch-size 1 \
--exit-on-missing-checkpoint \
--no-load-rng \
--no-load-optim \
--untie-embeddings-and-output-weights \
--add-qkv-bias \
--make-vocab-size-divisible-by 1 \
--padded-vocab-size 152064 \
--rotary-base 1000000.0 \
--no-gradient-accumulation-fusion \
--attention-softmax-in-fp32 \
--seed 1234 \
--group-query-attention \
--num-query-groups 4 \
| tee logs/evaluation_mcore_qwen2_7b_full_${TASK}.log

View File

@ -0,0 +1,104 @@
#!/bin/bash
export HCCL_CONNECT_TIMEOUT=1200
export CUDA_DEVICE_MAX_CONNECTIONS=1
NPUS_PER_NODE=8
MASTER_ADDR=localhost
MASTER_PORT=6000
NNODES=1
NODE_RANK=0
WORLD_SIZE=$(($NPUS_PER_NODE*$NNODES))
# please fill these path configurations
CKPT_LOAD_DIR="your model ckpt path"
CKPT_SAVE_DIR="your model save ckpt path"
DATA_PATH="your data path"
TOKENIZER_PATH="your tokenizer path"
TP=1
PP=8
SEQ_LEN=4096
DISTRIBUTED_ARGS="
--nproc_per_node $NPUS_PER_NODE \
--nnodes $NNODES \
--node_rank $NODE_RANK \
--master_addr $MASTER_ADDR \
--master_port $MASTER_PORT
"
GPT_ARGS="
--finetune \
--is-instruction-dataset \
--variable-seq-lengths \
--tokenizer-not-use-fast \
--prompt-type qwen \
--use-mcore-models \
--tensor-model-parallel-size ${TP} \
--pipeline-model-parallel-size ${PP} \
--num-layers 28 \
--num-layer-list 4,4,4,4,3,3,3,3 \
--hidden-size 3584 \
--ffn-hidden-size 18944 \
--num-attention-heads 28 \
--tokenizer-type PretrainedFromHF \
--tokenizer-name-or-path ${TOKENIZER_PATH} \
--seq-length ${SEQ_LEN} \
--max-position-embeddings ${SEQ_LEN} \
--micro-batch-size 1 \
--global-batch-size 256 \
--make-vocab-size-divisible-by 1 \
--padded-vocab-size 152064 \
--rotary-base 1000000.0 \
--lr 1.25e-6 \
--min-lr 1.25e-7 \
--train-iters 2000 \
--lr-decay-style cosine \
--untie-embeddings-and-output-weights \
--disable-bias-linear \
--attention-dropout 0.0 \
--init-method-std 0.02 \
--hidden-dropout 0.0 \
--position-embedding-type rope \
--normalization RMSNorm \
--swiglu \
--use-flash-attn \
--weight-decay 0.0 \
--use-rotary-position-embeddings \
--no-masked-softmax-fusion \
--attention-softmax-in-fp32 \
--clip-grad 1.0 \
--adam-beta1 0.9 \
--adam-beta2 0.999 \
--add-qkv-bias \
--initial-loss-scale 4096 \
--no-gradient-accumulation-fusion \
--no-load-optim \
--no-load-rng \
--seed 1234 \
--bf16 \
--group-query-attention \
--num-query-groups 4 \
--norm-epsilon 1e-06 \
"
DATA_ARGS="
--data-path $DATA_PATH \
--split 100,0,0
"
OUTPUT_ARGS="
--log-interval 1 \
--save-interval 2000 \
--eval-interval 2000 \
--eval-iters 0 \
"
torchrun $DISTRIBUTED_ARGS pretrain_gpt.py \
$GPT_ARGS \
$DATA_ARGS \
$OUTPUT_ARGS \
--distributed-backend nccl \
--load ${CKPT_LOAD_DIR} \
--save ${CKPT_SAVE_DIR} \
| tee logs/tune_mcore_qwen2_7b_full_2000.log