OfflineArcher / submit_OfflineArcher_RSA.sh
Jessie09's picture
Upload dataset
8014d08 verified
#!/bin/bash -l
# SLURM SUBMIT SCRIPT
#SBATCH --nodelist=node-gpu01
#SBATCH --gres=gpu:1 # Request N GPUs per machine
export TMPDIR=$HOME/tmp
export PYTORCH_CUDA_ALLOC_CONF=max_split_size_mb:64,expandable_segments:True
export NCCL_P2P_DISABLE=1
export CUDA_LAUNCH_BLOCKING=1
export CUDA_DEVICE_MAX_CONNECTIONS=1
export NCCL_NSOCKS_PERTHREAD=4
export NCCL_SOCKET_NTHREADS=2
export PYTORCH_NO_CUDA_MEMORY_CACHING=1 # 禁用缓存
export MAX_JOBS=4 # 限制并行加载
export ACCELERATE_USE_DEEPSPEED="true"
export TORCH_USE_CUDA_DSA=1
actor_lr=1e-5
critic_lr=1e-5
critic_expectile=0.9
inv_temp=1.0
batch_size=2
accumulate_grad_batches=16 #8
echo "当前conda环境: $(which python)"
echo "python版本: $(python --version)"
echo "accelerate版本: $(accelerate --version)"
echo "CUDA_VISIBLE_DEVICES: $CUDA_VISIBLE_DEVICES"
echo "nvidia-smi:"
nvidia-smi
echo "==== 测试python能否运行 ===="
python -c "print('Python可以运行')"
echo "==== 测试main.py是否存在 ===="
ls -l "$HOME/codes/OfflineArcher/main.py"
echo "==== 测试accelerate能否import torch ===="
accelerate launch --help || echo "accelerate launch命令无法运行"
echo "==== 开始正式训练 ===="
set -x # 打印每一行命令,方便debug
accelerate launch \
--config_file "$HOME/codes/OfflineArcher/deepspeed_zero3.yaml" \
--num_processes 4 \
--gpu_ids 4,5,6,7 \
--main_process_port 29500 \
"$HOME/codes/OfflineArcher/main.py" \
fit \
--data=RSAGame \
--data.batch_size=2 \
--data.base_model="Qwen3-14B" \
--data.n_traj_eval=4 \
--model=OfflineArcher \
--model.optimize_critic=True \
--model.actor_lr=$actor_lr \
--model.critic_lr=$critic_lr \
--model.discount_factor=0.99 \
--model.tau=0.05 \
--model.critic_expectile=$critic_expectile \
--model.inv_temp=$inv_temp \
--model.accumulate_grad_batches=4 \
--model.model_name_or_path="/home/jiashuo/codes/ForesightOptim/checkpoints/im_Qwen3-14B_word/merged_model" \
--trainer.fast_dev_run=False \
--trainer.max_epoch=1 \
--trainer.logger=WandbLogger \
--trainer.logger.init_args.project="WordTaboo-Official" \
--trainer.default_root_dir="checkpoints/archer_Qwen3-14B_word" \
--trainer.logger.init_args.name="Test-AC-critic_expectile_$critic_expectile-inv_temp_$inv_temp" \
--trainer.strategy=deepspeed_stage_3 \
--trainer.devices=4 \
--trainer.accelerator=gpu \
--trainer.precision=bf16 \
--trainer.enable_model_summary=false \
--trainer.val_check_interval=null \
--trainer.limit_val_batches=0 > Qwen3-14B_RSA_log.txt 2>&1
echo "第三个任务返回码: $?"
tail -20 Qwen3-14B_RSA_log.txt
accelerate launch \
--config_file "$HOME/codes/OfflineArcher/deepspeed_zero3.yaml" \
--num_processes 4 \
--gpu_ids 4,5,6,7 \
--main_process_port 29500 \
"$HOME/codes/OfflineArcher/main.py" \
fit \
--data=WordTaboo \
--data.batch_size=2 \
--data.base_model="Qwen3-14B" \
--data.n_traj_eval=4 \
--model=OfflineArcher \
--model.optimize_critic=True \
--model.actor_lr=$actor_lr \
--model.critic_lr=$critic_lr \
--model.discount_factor=0.99 \
--model.tau=0.05 \
--model.critic_expectile=$critic_expectile \
--model.inv_temp=$inv_temp \
--model.accumulate_grad_batches=4 \
--model.model_name_or_path="/home/jiashuo/codes/ForesightOptim/checkpoints/im_Qwen3-14B_word/merged_model" \
--trainer.fast_dev_run=False \
--trainer.max_epoch=1 \
--trainer.logger=WandbLogger \
--trainer.logger.init_args.project="WordTaboo-Official" \
--trainer.default_root_dir="checkpoints/archer_Qwen3-14B_word" \
--trainer.logger.init_args.name="Test-AC-critic_expectile_$critic_expectile-inv_temp_$inv_temp" \
--trainer.strategy=deepspeed_stage_3 \
--trainer.devices=4 \
--trainer.accelerator=gpu \
--trainer.precision=bf16 \
--trainer.enable_model_summary=false \
--trainer.val_check_interval=null \
--trainer.limit_val_batches=0 > Qwen3-14B_Word_log.txt 2>&1
echo "第三个任务返回码: $?"
tail -20 Qwen3-14B_Word_log.txt
accelerate launch \
--config_file "$HOME/codes/OfflineArcher/deepspeed_zero3.syaml" \
--num_processes 4 \
--gpu_ids 4,5,6,7 \
--main_process_port 29500 \
"$HOME/codes/OfflineArcher/main.py" \
fit \
--data=StrategicDialogue \
--data.batch_size=2 \
--data.base_model="Qwen3-14B" \
--data.n_traj_eval=4 \
--model=OfflineArcher \
--model.optimize_critic=True \
--model.actor_lr=$actor_lr \
--model.critic_lr=$critic_lr \
--model.discount_factor=0.99 \
--model.tau=0.05 \
--model.critic_expectile=$critic_expectile \
--model.inv_temp=$inv_temp \
--model.accumulate_grad_batches=4 \
--model.model_name_or_path="/home/jiashuo/codes/ForesightOptim/checkpoints/im_Qwen3-14B_strategic/merged_model" \
--trainer.fast_dev_run=False \
--trainer.max_epoch=1 \
--trainer.logger=WandbLogger \
--trainer.logger.init_args.project="Strategic-Official" \
--trainer.default_root_dir="checkpoints/archer_Qwen3-14B_strategic" \
--trainer.logger.init_args.name="Test-AC-critic_expectile_$critic_expectile-inv_temp_$inv_temp" \
--trainer.strategy=deepspeed_stage_3 \
--trainer.devices=4 \
--trainer.accelerator=gpu \
--trainer.precision=bf16 \
--trainer.enable_model_summary=false \
--trainer.val_check_interval=null \
--trainer.limit_val_batches=0 > Qwen3-14B_Strategic_log.txt 2>&1
echo "第五个任务返回码: $?"
tail -20 Qwen3-14B_Strategic_log.txt
set +x
echo "所有任务执行完毕。请检查上面各个log文件的最后20行和返回码。"