48 lines
1.0 KiB
YAML
48 lines
1.0 KiB
YAML
# Model arguments
|
|
model_name_or_path: Qwen/Qwen2.5-1.5B-Instruct
|
|
model_revision: main
|
|
torch_dtype: bfloat16
|
|
attn_implementation: flash_attention_2
|
|
|
|
# Data training arguments
|
|
dataset_name: AI-MO/NuminaMath-TIR
|
|
dataset_configs:
|
|
- all
|
|
# Num processes is less by 1 as vLLM is using 1 GPU
|
|
num_processes: 7
|
|
|
|
# GRPO trainer config
|
|
bf16: true
|
|
use_vllm: true
|
|
vllm_device: auto
|
|
vllm_gpu_memory_utilization: 0.7
|
|
do_eval: true
|
|
eval_strategy: steps
|
|
eval_steps: 100
|
|
gradient_accumulation_steps: 16
|
|
gradient_checkpointing: true
|
|
gradient_checkpointing_kwargs:
|
|
use_reentrant: false
|
|
hub_model_id: Qwen2.5-1.5B-Open-R1-GRPO
|
|
hub_strategy: every_save
|
|
learning_rate: 2.0e-05
|
|
log_level: info
|
|
logging_steps: 5
|
|
logging_strategy: steps
|
|
lr_scheduler_type: cosine
|
|
max_prompt_length: 512
|
|
max_completion_length: 1024
|
|
max_steps: -1
|
|
num_generations: 7
|
|
num_train_epochs: 1
|
|
output_dir: data/Qwen2.5-1.5B-Open-R1-GRPO
|
|
overwrite_output_dir: true
|
|
per_device_eval_batch_size: 32
|
|
per_device_train_batch_size: 16
|
|
push_to_hub: true
|
|
report_to:
|
|
- wandb
|
|
save_strategy: "no"
|
|
seed: 42
|
|
warmup_ratio: 0.1
|