ModelLink2/examples/legacy/llama2/generate_llama2_7b_lora_ptd.sh
guoxinjie 670f729060 !1760 整理主线分支 README
Merge pull request !1760 from guoxinjie/arrange_readme
2024-10-25 01:33:31 +00:00

61 lines
2.0 KiB
Bash

#!/bin/bash
# The number of parameters is not aligned
export HCCL_CONNECT_TIMEOUT=1200
export COMBINED_ENABLE=1
export CUDA_DEVICE_MAX_CONNECTIONS=1
# please fill these path configurations
CHECKPOINT="your model directory path"
CHECKPOINT_LORA="your lora model directory path"
TOKENIZER_PATH="your tokenizer directory path"
TOKENIZER_MODEL="your tokenizer.model file path"
# Change for multinode config
MASTER_ADDR=localhost
MASTER_PORT=6001
NNODES=1
NODE_RANK=0
NPUS_PER_NODE=8
WORLD_SIZE=$(($NPUS_PER_NODE*$NNODES))
DISTRIBUTED_ARGS="--nproc_per_node $NPUS_PER_NODE --nnodes $NNODES --node_rank $NODE_RANK --master_addr $MASTER_ADDR --master_port $MASTER_PORT"
python -m torch.distributed.launch $DISTRIBUTED_ARGS inference.py \
--tensor-model-parallel-size 1 \
--pipeline-model-parallel-size 1 \
--num-layers 32 \
--hidden-size 4096 \
--ffn-hidden-size 11008 \
--position-embedding-type rope \
--seq-length 4096 \
--max-new-tokens 256 \
--micro-batch-size 4 \
--global-batch-size 16 \
--num-attention-heads 32 \
--max-position-embeddings 4096 \
--swiglu \
--load "${CHECKPOINT}" \
--tokenizer-type PretrainedFromHF \
--tokenizer-name-or-path "${TOKENIZER_PATH}" \
--tokenizer-model "${TOKENIZER_MODEL}" \
--tokenizer-not-use-fast \
--bf16 \
--normalization RMSNorm \
--untie-embeddings-and-output-weights \
--disable-bias-linear \
--attention-softmax-in-fp32 \
--no-load-optim \
--no-load-rng \
--no-masked-softmax-fusion \
--no-gradient-accumulation-fusion \
--exit-on-missing-checkpoint \
--lora-load ${CHECKPOINT_LORA} \
--lora-r 8 \
--lora-alpha 16 \
--lora-fusion \
--lora-target-modules query_key_value dense dense_h_to_4h dense_4h_to_h \
--make-vocab-size-divisible-by 1 \
--prompt-type 'alpaca' \
| tee logs/generate_llama2_7b_lora.log