diff --git a/3.test_cases/pytorch/verl/.gitignore b/3.test_cases/pytorch/verl/.gitignore new file mode 100644 index 000000000..738a410dc --- /dev/null +++ b/3.test_cases/pytorch/verl/.gitignore @@ -0,0 +1 @@ +kustomization.yaml \ No newline at end of file diff --git a/3.test_cases/pytorch/verl/rlvr/Dockerfile b/3.test_cases/pytorch/verl/rlvr/Dockerfile index 849b9b9df..ab13cd66a 100644 --- a/3.test_cases/pytorch/verl/rlvr/Dockerfile +++ b/3.test_cases/pytorch/verl/rlvr/Dockerfile @@ -1,7 +1,8 @@ # Dockerfile for VERL with EFA support # Using hiyouga/verl base image and adding EFA capabilities -FROM hiyouga/verl:ngc-th2.6.0-cu126-vllm0.8.4-flashinfer0.2.2-cxx11abi0 - +ARG TAG=vllm011.latest +FROM verlai/verl:${TAG} +ARG MEGATRON_LM_VERSION=core_v0.13.1 # EFA configuration ARG OPEN_MPI_PATH=/opt/amazon/openmpi/ ENV EFA_VERSION=1.43.3 @@ -115,10 +116,21 @@ RUN git clone https://github.com/volcengine/verl.git && \ cd verl && \ git checkout v0.6.1 WORKDIR /workspace/verl - # Install VERL in development mode RUN pip install -e . +##################### +# Install megatron-lm +##################### +RUN pip install -U setuptools +RUN cd /workspace && git clone --depth 1 --branch ${MEGATRON_LM_VERSION} https://github.com/NVIDIA/Megatron-LM.git \ + && cd Megatron-LM \ + && pip install nltk \ + && pip install . +# Check if speparate Apex is needed +# https://verl.readthedocs.io/en/latest/start/install.html#install-from-docker-image + + # Set working directory WORKDIR /workspace diff --git a/3.test_cases/pytorch/verl/rlvr/README.md b/3.test_cases/pytorch/verl/rlvr/README.md index eab7a90a0..7e55e88ef 100644 --- a/3.test_cases/pytorch/verl/rlvr/README.md +++ b/3.test_cases/pytorch/verl/rlvr/README.md @@ -70,9 +70,15 @@ Build a Docker image with verl, EFA networking support, and push to ECR: ./setup/build-push.sh ``` -Deploy the Ray cluster with head and worker pods configured for distributed training: +Generate kustomization.yaml from your environment variables and deploy the Ray cluster: ```bash -envsubst < setup/raycluster.yaml | kubectl apply -f - +./setup/generate-kustomization.sh +kubectl apply -k setup/ +``` + +Alternatively, you can combine both steps: +```bash +./setup/generate-kustomization.sh && kubectl apply -k setup/ ``` > **Note**: Considerations before applying raycluster.yaml diff --git a/3.test_cases/pytorch/verl/rlvr/recipe/run_qwen3-235b_megatron_96gb.sh b/3.test_cases/pytorch/verl/rlvr/recipe/run_qwen3-235b_megatron_96gb.sh new file mode 100644 index 000000000..2df5f7b43 --- /dev/null +++ b/3.test_cases/pytorch/verl/rlvr/recipe/run_qwen3-235b_megatron_96gb.sh @@ -0,0 +1,182 @@ +#!/usr/bin/env bash +set -xeuo pipefail + +## !!!!!!!important!!!!!! +## set the following environment variables on all your nodes +# env_vars: +# CUDA_DEVICE_MAX_CONNECTIONS: "1" +# NCCL_NVLS_ENABLE: "0" +# VLLM_USE_V1: 1 +# install mbridge=0.1.13 on all your node with the following command: +# pip3 install git+https://github.com/ISEEKYAN/mbridge + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +[ -f "${SCRIPT_DIR}/env.sh" ] && source "${SCRIPT_DIR}/env.sh" + +adv_estimator=grpo + +use_kl_in_reward=False +kl_coef=0.0 +use_kl_loss=True +kl_loss_coef=0.001 + +clip_ratio_low=0.2 +clip_ratio_high=0.28 + +max_prompt_length=$((1024 * 2)) +max_response_length=$((1204 * 8)) +enable_overlong_buffer=True +overlong_buffer_len=$((1024 * 1)) +overlong_penalty_factor=1.0 + +loss_agg_mode="token-mean" + +train_prompt_bsz=${TRAIN_BS:-32} +n_resp_per_prompt=8 +train_prompt_mini_bsz=16 + +# minimum nodes need for qwen3-235B-A22B +NNODES=${NNODES:-4} +# Paths + +RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} + +MODEL_PATH=$RAY_DATA_HOME/models/Qwen3-235B-A22B +MODEL_PATH="${MODEL_PATH:-Qwen/Qwen3-VL-235B-A22B-Instruct}" + +TRAIN_FILE=/fsx/verl/data/geo3k/train.parquet +TEST_FILE=/fsx/verl/data/geo3k/test.parquet + +# Algorithm +temperature=1.0 +top_p=1.0 +top_k=-1 # 0 for HF rollout, -1 for vLLM rollout +val_top_p=0.7 +# Performance Related Parameter +use_dynamic_bsz=True +actor_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 10 / 10)) +infer_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 1)) +offload=True +OPTIM_OFFLOAD=${OPTIM_OFFLOAD:-True} +gen_tp=8 +train_tp=${TP:-4} +train_pp=${PP:-8} + +EP=${EP:-4} +ETP=1 +CP=1 +optimizer_offload_fraction=${OFFLOAD_FRACTION:-1.} +last_layer=${LAST_LAYER:-10} + +project_name='verl-qwen3' +exp_name="235B-${NNODES}-pp${train_pp}-tp${train_tp}-ep${EP}-actor-length${actor_ppo_max_token_len}" +CKPTS_DIR=$RAY_DATA_HOME/ckpt/${project_name}/${exp_name} + +# TODO: support cuda graph for rollout by setting the following config + # actor_rollout_ref.rollout.cudagraph_capture_sizes=[1,2,4,8,16,32] + # actor_rollout_ref.rollout.enforce_eager=False + +python3 -m verl.trainer.main_ppo \ + --config-path=config \ + --config-name='ppo_megatron_trainer.yaml' \ + data.train_files="${TRAIN_FILE}" \ + data.val_files="${TEST_FILE}" \ + data.prompt_key=prompt \ + data.truncation='left' \ + data.max_prompt_length=${max_prompt_length} \ + data.max_response_length=${max_response_length} \ + data.train_batch_size=${train_prompt_bsz} \ + actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ + actor_rollout_ref.rollout.name=vllm \ + actor_rollout_ref.rollout.enforce_eager=True \ + actor_rollout_ref.rollout.free_cache_engine=True \ + algorithm.adv_estimator=${adv_estimator} \ + algorithm.use_kl_in_reward=${use_kl_in_reward} \ + algorithm.kl_ctrl.kl_coef=${kl_coef} \ + actor_rollout_ref.model.use_fused_kernels=True \ + actor_rollout_ref.actor.megatron.use_mbridge=True \ + actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ + actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ + actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ + actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ + actor_rollout_ref.actor.clip_ratio_c=10.0 \ + actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=2 \ + actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=4 \ + actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=4 \ + actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ + actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ + actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ + actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${actor_ppo_max_token_len} \ + actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ + actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ + actor_rollout_ref.model.path="${MODEL_PATH}" \ + actor_rollout_ref.actor.optim.lr=1e-6 \ + actor_rollout_ref.actor.optim.lr_warmup_steps=10 \ + actor_rollout_ref.actor.optim.weight_decay=0.1 \ + +actor_rollout_ref.actor.optim.override_optimizer_config.optimizer_offload_fraction=${optimizer_offload_fraction} \ + +actor_rollout_ref.actor.optim.override_optimizer_config.overlap_cpu_optimizer_d2h_h2d=True \ + +actor_rollout_ref.actor.optim.override_optimizer_config.use_precision_aware_optimizer=True \ + +actor_rollout_ref.actor.optim.override_optimizer_config.optimizer_cpu_offload=True \ + actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ + actor_rollout_ref.actor.megatron.param_offload=${offload} \ + actor_rollout_ref.actor.megatron.optimizer_offload=${OPTIM_OFFLOAD} \ + actor_rollout_ref.actor.megatron.grad_offload=${offload} \ + actor_rollout_ref.actor.megatron.pipeline_model_parallel_size=${train_pp} \ + actor_rollout_ref.actor.megatron.tensor_model_parallel_size=${train_tp} \ + actor_rollout_ref.actor.megatron.expert_model_parallel_size=$EP \ + actor_rollout_ref.actor.megatron.expert_tensor_parallel_size=$ETP \ + actor_rollout_ref.actor.megatron.context_parallel_size=${CP} \ + actor_rollout_ref.actor.entropy_coeff=0 \ + actor_rollout_ref.actor.optim.clip_grad=1.0 \ + actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ + actor_rollout_ref.rollout.gpu_memory_utilization=0.85 \ + actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ + actor_rollout_ref.rollout.enable_chunked_prefill=True \ + actor_rollout_ref.rollout.max_num_batched_tokens=$((max_prompt_length + max_response_length)) \ + actor_rollout_ref.rollout.temperature=${temperature} \ + actor_rollout_ref.rollout.top_p=${top_p} \ + actor_rollout_ref.rollout.top_k=${top_k} \ + actor_rollout_ref.nccl_timeout=1200 \ + actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ + actor_rollout_ref.rollout.val_kwargs.top_p=${val_top_p} \ + actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ + actor_rollout_ref.rollout.val_kwargs.do_sample=True \ + actor_rollout_ref.rollout.val_kwargs.n=1 \ + actor_rollout_ref.ref.megatron.pipeline_model_parallel_size=${train_pp} \ + actor_rollout_ref.ref.megatron.tensor_model_parallel_size=${train_tp} \ + actor_rollout_ref.ref.megatron.expert_model_parallel_size=$EP \ + actor_rollout_ref.ref.megatron.expert_tensor_parallel_size=$ETP \ + actor_rollout_ref.ref.megatron.context_parallel_size=${CP} \ + actor_rollout_ref.ref.megatron.param_offload=${offload} \ + +actor_rollout_ref.actor.megatron.override_transformer_config.apply_rope_fusion=True \ + +actor_rollout_ref.actor.megatron.override_transformer_config.masked_softmax_fusion=True \ + +actor_rollout_ref.actor.megatron.override_transformer_config.bias_activation_fusion=True \ + +actor_rollout_ref.actor.megatron.override_transformer_config.bias_dropout_fusion=True \ + +actor_rollout_ref.actor.megatron.override_transformer_config.gradient_accumulation_fusion=True \ + +actor_rollout_ref.actor.megatron.override_transformer_config.deallocate_pipeline_outputs=True \ + +actor_rollout_ref.actor.megatron.override_transformer_config.persist_layer_norm=True \ + +actor_rollout_ref.actor.megatron.override_transformer_config.moe_grouped_gemm=True \ + +actor_rollout_ref.actor.megatron.override_transformer_config.moe_permute_fusion=True \ + +actor_rollout_ref.actor.megatron.override_transformer_config.moe_token_dispatcher_type="flex" \ + +actor_rollout_ref.actor.megatron.override_transformer_config.moe_router_dtype=fp32 \ + +actor_rollout_ref.actor.megatron.override_transformer_config.moe_enable_deepep=True \ + +actor_rollout_ref.actor.megatron.override_transformer_config.account_for_loss_in_pipeline_split=True \ + +actor_rollout_ref.actor.megatron.override_transformer_config.account_for_embedding_in_pipeline_split=True \ + reward_model.reward_manager=dapo \ + +reward_model.reward_kwargs.overlong_buffer_cfg.enable=${enable_overlong_buffer} \ + +reward_model.reward_kwargs.overlong_buffer_cfg.len=${overlong_buffer_len} \ + +reward_model.reward_kwargs.overlong_buffer_cfg.penalty_factor=${overlong_penalty_factor} \ + +reward_model.reward_kwargs.overlong_buffer_cfg.log=False \ + +reward_model.reward_kwargs.max_resp_len=${max_response_length} \ + trainer.logger=['console','wandb'] \ + trainer.project_name="${project_name}" \ + trainer.experiment_name="${exp_name}" \ + trainer.n_gpus_per_node=8 \ + trainer.nnodes="${NNODES}" \ + trainer.val_before_train=False \ + trainer.test_freq=10 \ + trainer.save_freq=100 \ + trainer.total_epochs=10 \ + trainer.default_local_dir="${CKPTS_DIR}" \ + trainer.resume_mode=auto \ + trainer.log_val_generations=10 diff --git a/3.test_cases/pytorch/verl/rlvr/recipe/run_qwen3_vl-235b-megatron.sh b/3.test_cases/pytorch/verl/rlvr/recipe/run_qwen3_vl-235b-megatron.sh new file mode 100644 index 000000000..4f9e36658 --- /dev/null +++ b/3.test_cases/pytorch/verl/rlvr/recipe/run_qwen3_vl-235b-megatron.sh @@ -0,0 +1,86 @@ +#laAdopted from https://github.com/volcengine/verl/blob/249c0831b793751f488d80bcfeafb8f7c544cd65/examples/grpo_trainer/run_qwen3_vl-235b-megatron.sh +set -x +ENGINE=${1:-vllm} +export CUDA_DEVICE_MAX_CONNECTIONS=1 # For megatron communication/computation overlapping + +export VLLM_ALLREDUCE_USE_SYMM_MEM=0 # for vllm0.11.0 with TP + + +HF_MODEL_PATH=${HF_MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen3-VL-235B-A22B-Instruct"} + +GEN_TP=${GEN_TP:-16} +CP=${CP:-2} +TP=${TP:-1} +PP=${PP:-8} +EP=${EP:-8} +ETP=${ETP:-1} + +# Use env-provided data paths or default to Geometry3k under RAY_DATA_HOME +train_path=${TRAIN_FILE:-${RAY_DATA_HOME}/data/geo3k/train.parquet} +test_path=${TEST_FILE:-${RAY_DATA_HOME}/data/geo3k/test.parquet} + +python3 -m verl.trainer.main_ppo --config-path=config \ + --config-name='ppo_megatron_trainer.yaml'\ + algorithm.adv_estimator=grpo \ + data.train_files="$train_path" \ + data.val_files="$test_path" \ + data.train_batch_size=512 \ + data.max_prompt_length=1024 \ + data.max_response_length=2048 \ + data.filter_overlong_prompts=True \ + data.truncation='error' \ + actor_rollout_ref.model.path=$HF_MODEL_PATH \ + actor_rollout_ref.actor.optim.lr=1e-6 \ + actor_rollout_ref.actor.ppo_mini_batch_size=128 \ + actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=1 \ + actor_rollout_ref.actor.megatron.pipeline_model_parallel_size=$PP \ + actor_rollout_ref.actor.megatron.tensor_model_parallel_size=$TP \ + actor_rollout_ref.actor.megatron.context_parallel_size=$CP \ + actor_rollout_ref.actor.megatron.expert_model_parallel_size=$EP \ + actor_rollout_ref.actor.megatron.expert_tensor_parallel_size=$ETP \ + actor_rollout_ref.actor.use_kl_loss=True \ + actor_rollout_ref.actor.kl_loss_coef=0.01 \ + actor_rollout_ref.actor.kl_loss_type=low_var_kl \ + actor_rollout_ref.actor.entropy_coeff=0 \ + actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=1 \ + actor_rollout_ref.rollout.tensor_model_parallel_size=$GEN_TP \ + actor_rollout_ref.actor.use_dynamic_bsz=True \ + actor_rollout_ref.actor.ppo_max_token_len_per_gpu=4096 \ + actor_rollout_ref.ref.log_prob_use_dynamic_bsz=True \ + actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=4096 \ + actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=True \ + actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=4096 \ + actor_rollout_ref.rollout.name=$ENGINE \ + +actor_rollout_ref.rollout.engine_kwargs.vllm.disable_mm_preprocessor_cache=True \ + actor_rollout_ref.rollout.gpu_memory_utilization=0.7 \ + actor_rollout_ref.rollout.n=5 \ + actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=1 \ + actor_rollout_ref.actor.megatron.use_mbridge=True \ + actor_rollout_ref.actor.megatron.param_offload=True \ + actor_rollout_ref.actor.megatron.optimizer_offload=True \ + actor_rollout_ref.actor.megatron.grad_offload=True \ + actor_rollout_ref.ref.megatron.param_offload=True \ + +actor_rollout_ref.actor.optim.override_optimizer_config.optimizer_offload_fraction=1 \ + +actor_rollout_ref.actor.optim.override_optimizer_config.overlap_cpu_optimizer_d2h_h2d=True \ + +actor_rollout_ref.actor.optim.override_optimizer_config.use_precision_aware_optimizer=True \ + +actor_rollout_ref.actor.optim.override_optimizer_config.optimizer_cpu_offload=True \ + +actor_rollout_ref.actor.megatron.override_transformer_config.moe_router_dtype=fp32 \ + +actor_rollout_ref.actor.megatron.override_transformer_config.moe_enable_deepep=True \ + +actor_rollout_ref.actor.megatron.override_transformer_config.moe_token_dispatcher_type=flex \ + +actor_rollout_ref.actor.megatron.override_transformer_config.recompute_method=uniform \ + +actor_rollout_ref.actor.megatron.override_transformer_config.recompute_granularity=full \ + +actor_rollout_ref.actor.megatron.override_transformer_config.recompute_num_layers=1 \ + +actor_rollout_ref.actor.megatron.override_transformer_config.gradient_accumulation_fusion=True \ + +actor_rollout_ref.actor.megatron.override_transformer_config.moe_permute_fusion=True \ + +actor_rollout_ref.actor.megatron.override_transformer_config.account_for_loss_in_pipeline_split=True \ + +actor_rollout_ref.actor.megatron.override_transformer_config.account_for_embedding_in_pipeline_split=True \ + algorithm.use_kl_in_reward=False \ + trainer.critic_warmup=0 \ + trainer.logger='["console","wandb"]' \ + trainer.project_name='verl_grpo_example_geo3k' \ + trainer.experiment_name='qwen3_vl_235b_megatron' \ + trainer.n_gpus_per_node=8 \ + trainer.nnodes=4 \ + trainer.save_freq=20 \ + trainer.test_freq=5 \ + trainer.total_epochs=15 $@ \ No newline at end of file diff --git a/3.test_cases/pytorch/verl/rlvr/setup/download-model-job.sh b/3.test_cases/pytorch/verl/rlvr/setup/download-model-job.sh new file mode 100755 index 000000000..0d061eeae --- /dev/null +++ b/3.test_cases/pytorch/verl/rlvr/setup/download-model-job.sh @@ -0,0 +1,92 @@ +#!/bin/bash +set -euo pipefail + +# Create a Kubernetes Job that downloads a HF model into HF_MODEL_PATH. +# Defaults: +# HF_MODEL_REPO="Qwen/Qwen3-VL-235B-A22B-Instruct" +# HF_MODEL_PATH="${RAY_DATA_HOME:-/fsx/verl}/models/${MODEL_LOCAL_NAME:-Qwen3-VL-235B-A22B-Instruct}" +# Requirements: +# - HF_TOKEN set in setup/env_vars +# - fsx-claim PVC available and mounted at /fsx +# - Image must have python + huggingface_hub (present in the training image) + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +ENV_VARS_FILE="${SCRIPT_DIR}/env_vars" + +if [ ! -f "${ENV_VARS_FILE}" ]; then + echo "Missing ${ENV_VARS_FILE}. Copy env_vars.example and set your values." + exit 1 +fi + +source "${ENV_VARS_FILE}" + +if [ -z "${HF_TOKEN:-}" ]; then + echo "HF_TOKEN must be set in ${ENV_VARS_FILE}." + exit 1 +fi + +IMAGE="${REGISTRY}${IMAGE}:${TAG}" +JOB_NAME="download-qwen3-vl-235b" +HF_MODEL_REPO="${HF_MODEL_REPO:-Qwen/Qwen3-VL-235B-A22B-Instruct}" +MODEL_LOCAL_NAME="${MODEL_LOCAL_NAME:-Qwen3-VL-235B-A22B-Instruct}" +HF_MODEL_PATH="${HF_MODEL_PATH:-${RAY_DATA_HOME:-/fsx/verl}/models/${MODEL_LOCAL_NAME}}" + +cat < diff --git a/3.test_cases/pytorch/verl/rlvr/setup/generate-kustomization.sh b/3.test_cases/pytorch/verl/rlvr/setup/generate-kustomization.sh new file mode 100755 index 000000000..57d2cd1f6 --- /dev/null +++ b/3.test_cases/pytorch/verl/rlvr/setup/generate-kustomization.sh @@ -0,0 +1,108 @@ +#!/bin/bash +set -e + +# Script to generate kustomization.yaml from env_vars file +# This replaces the envsubst workflow with kustomize-based configuration + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +ENV_VARS_FILE="${SCRIPT_DIR}/env_vars" +KUSTOMIZATION_FILE="${SCRIPT_DIR}/kustomization.yaml" + +# Check if env_vars file exists +if [ ! -f "${ENV_VARS_FILE}" ]; then + echo "Error: ${ENV_VARS_FILE} not found." + echo "Please copy setup/env_vars.example to setup/env_vars and configure it." + exit 1 +fi + +# Source the env_vars file to get all variables +source "${ENV_VARS_FILE}" + +# Ensure required variables are set +if [ -z "${REGISTRY}" ] || [ -z "${IMAGE}" ] || [ -z "${TAG}" ]; then + echo "Error: REGISTRY, IMAGE, and TAG must be set in env_vars" + exit 1 +fi + +# Construct the full image name +FULL_IMAGE="${REGISTRY}${IMAGE}:${TAG}" + +# Generate kustomization.yaml with one inline strategic merge patch (all fields) +cat > "${KUSTOMIZATION_FILE}" < /tmp/download_geo3k.py << 'EOF' +""" +Preprocess the Geometry3k dataset to parquet format. +Adapted from https://github.com/volcengine/verl/blob/main/examples/data_preprocess/geo3k.py +""" +import os +import re +import datasets + +DATA_DIR = os.environ.get("DATA_DIR", "/fsx/verl/data/geo3k") +os.makedirs(DATA_DIR, exist_ok=True) + +data_source = "hiyouga/geometry3k" +instruction_following = ( + r"You FIRST think about the reasoning process as an internal monologue and then provide the final answer. " + r"The reasoning process MUST BE enclosed within tags. " + r"The final answer MUST BE put in \\boxed{}." +) + +print(f"Loading dataset: {data_source}") +dataset = datasets.load_dataset(data_source) +train_dataset = dataset["train"] +test_dataset = dataset["test"] + +def make_map_fn(split): + def process_fn(example, idx): + problem = example.pop("problem") + prompt = problem + " " + instruction_following + answer = example.pop("answer") + images = example.pop("images") + data = { + "data_source": data_source, + "prompt": [{"role": "user", "content": prompt}], + "images": images, + "ability": "math", + "reward_model": {"style": "rule", "ground_truth": answer}, + "extra_info": { + "split": split, + "index": idx, + "answer": answer, + "question": problem, + }, + } + return data + return process_fn + +print("Processing train split...") +train_dataset = train_dataset.map(function=make_map_fn("train"), with_indices=True, num_proc=8) +print("Processing test split...") +test_dataset = test_dataset.map(function=make_map_fn("test"), with_indices=True, num_proc=8) + +train_path = os.path.join(DATA_DIR, "train.parquet") +test_path = os.path.join(DATA_DIR, "test.parquet") + +print(f"Saving train to {train_path}") +train_dataset.to_parquet(train_path) +print(f"Saving test to {test_path}") +test_dataset.to_parquet(test_path) + +print("Done. Summary:") +print(f"Train samples: {len(train_dataset)}") +print(f"Test samples: {len(test_dataset)}") +EOF + +# Copy script to pod +echo "Copying download script to pod..." +kubectl cp /tmp/download_geo3k.py ${HEAD_POD}:/tmp/download_geo3k.py + +# Execute the script in the pod +echo "Downloading Geometry3k data..." +kubectl exec ${HEAD_POD} -- bash -c "export DATA_DIR=${DATA_DIR}" +kubectl exec ${HEAD_POD} -- python3 /tmp/download_geo3k.py + +# Verify the files exist +echo "Verifying downloaded files..." +kubectl exec ${HEAD_POD} -- ls -lh ${DATA_DIR}/ + +echo "Geometry3k data download complete!" +echo "Data location: ${DATA_DIR}" +echo " - train.parquet" +echo " - test.parquet" + diff --git a/3.test_cases/pytorch/verl/rlvr/setup/prepare-model-path.sh b/3.test_cases/pytorch/verl/rlvr/setup/prepare-model-path.sh new file mode 100755 index 000000000..1b10ad245 --- /dev/null +++ b/3.test_cases/pytorch/verl/rlvr/setup/prepare-model-path.sh @@ -0,0 +1,32 @@ +#!/bin/bash +set -euo pipefail + +# Prepare a local model path for the Qwen3-VL-235B-A22B-Instruct checkpoint. +# This creates the target directory under ${RAY_DATA_HOME}/models and prints +# the HF_MODEL_PATH you can use in jobs. + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +ENV_VARS_FILE="${SCRIPT_DIR}/env_vars" + +if [ ! -f "${ENV_VARS_FILE}" ]; then + echo "Missing ${ENV_VARS_FILE}. Copy env_vars.example and set your values." + exit 1 +fi + +source "${ENV_VARS_FILE}" + +RAY_DATA_HOME="${RAY_DATA_HOME:-/fsx/verl}" +MODEL_LOCAL_NAME="${MODEL_LOCAL_NAME:-Qwen3-VL-235B-A22B-Instruct}" +TARGET_PATH="${RAY_DATA_HOME}/models/${MODEL_LOCAL_NAME}" + +mkdir -p "${TARGET_PATH}" + +cat > "${TARGET_PATH}/README.txt" <<'NOTE' +Place the local model files for Qwen3-VL-235B-A22B-Instruct here. +If you are using Hugging Face Hub, set MODEL_PATH (or HF_MODEL_PATH) to a valid +Hub repo ID instead, e.g. "Qwen/Qwen2-VL-7B-Instruct", and skip local files. +NOTE + +echo "Prepared model directory: ${TARGET_PATH}" +echo "Set HF_MODEL_PATH (or MODEL_PATH) to: ${TARGET_PATH}" + diff --git a/3.test_cases/pytorch/verl/rlvr/setup/raycluster.yaml b/3.test_cases/pytorch/verl/rlvr/setup/raycluster.yaml index 73b1ef555..0280f3bb4 100644 --- a/3.test_cases/pytorch/verl/rlvr/setup/raycluster.yaml +++ b/3.test_cases/pytorch/verl/rlvr/setup/raycluster.yaml @@ -1,4 +1,4 @@ -apiVersion: ray.io/v1alpha1 +apiVersion: ray.io/v1 kind: RayCluster metadata: name: rayml-efa @@ -19,7 +19,7 @@ spec: template: spec: nodeSelector: - node.kubernetes.io/instance-type: ${INSTANCE_TYPE} + node.kubernetes.io/instance-type: PLACEHOLDER_INSTANCE_TYPE sagemaker.amazonaws.com/node-health-status: Schedulable securityContext: runAsUser: 0 @@ -27,7 +27,7 @@ spec: fsGroup: 0 containers: - name: ray-head - image: ${REGISTRY}${IMAGE}:${TAG} ## IMAGE: Here you may choose which image your head pod will run + image: PLACEHOLDER_IMAGE ## IMAGE: Here you may choose which image your head pod will run env: ## ENV: Here is where you can send stuff to the head pod ## PROMETHEUS AND GRAFANA - AWS MANAGED SERVICES - name: RAY_GRAFANA_IFRAME_HOST @@ -54,7 +54,7 @@ spec: - name: TORCH_NCCL_ASYNC_ERROR_HANDLING value: "1" - name: HF_TOKEN - value: ${HF_TOKEN} + value: PLACEHOLDER_HF_TOKEN lifecycle: preStop: exec: @@ -96,19 +96,19 @@ spec: # type: DirectoryOrCreate workerGroupSpecs: # the pod replicas in this group typed worker - - replicas: $NUM_NODES ## REPLICAS: How many worker pods you want + - replicas: PLACEHOLDER_NUM_NODES ## REPLICAS: How many worker pods you want minReplicas: 1 maxReplicas: 10 # logical group name, for this called small-group, also can be functional groupName: gpu-group rayStartParams: - num-gpus: "$NUM_GPU_PER_NODE" + num-gpus: "PLACEHOLDER_NUM_GPU_PER_NODE" metrics-export-port: '8080' # Explicitly set metrics port for workers #pod template template: spec: nodeSelector: - node.kubernetes.io/instance-type: ${INSTANCE_TYPE} + node.kubernetes.io/instance-type: PLACEHOLDER_INSTANCE_TYPE sagemaker.amazonaws.com/node-health-status: Schedulable securityContext: runAsUser: 0 @@ -116,7 +116,7 @@ spec: fsGroup: 0 containers: - name: ray-worker - image: ${REGISTRY}${IMAGE}:${TAG} ## IMAGE: Here you may choose which image your head node will run + image: PLACEHOLDER_IMAGE ## IMAGE: Here you may choose which image your head node will run env: - name: FI_PROVIDER value: "efa" @@ -135,7 +135,7 @@ spec: - name: TORCH_NCCL_ASYNC_ERROR_HANDLING value: "1" - name: HF_TOKEN - value: ${HF_TOKEN} + value: PLACEHOLDER_HF_TOKEN lifecycle: preStop: exec: @@ -144,13 +144,13 @@ spec: limits: ## LIMITS: Set resource limits for your worker pods cpu: 16 memory: 200Gi - nvidia.com/gpu: $NUM_GPU_PER_NODE - vpc.amazonaws.com/efa: $NUM_EFA_PER_NODE + nvidia.com/gpu: PLACEHOLDER_NUM_GPU_PER_NODE + vpc.amazonaws.com/efa: PLACEHOLDER_NUM_EFA_PER_NODE requests: ## REQUESTS: Set resource requests for your worker pods cpu: 16 memory: 200Gi - nvidia.com/gpu: $NUM_GPU_PER_NODE - vpc.amazonaws.com/efa: $NUM_EFA_PER_NODE + nvidia.com/gpu: PLACEHOLDER_NUM_GPU_PER_NODE + vpc.amazonaws.com/efa: PLACEHOLDER_NUM_EFA_PER_NODE ports: - containerPort: 8080 name: metrics diff --git a/3.test_cases/pytorch/verl/rlvr/setup/submit-qwen-job.sh b/3.test_cases/pytorch/verl/rlvr/setup/submit-qwen-job.sh new file mode 100755 index 000000000..2062af214 --- /dev/null +++ b/3.test_cases/pytorch/verl/rlvr/setup/submit-qwen-job.sh @@ -0,0 +1,168 @@ +#!/bin/bash +set -euo pipefail + +# Submit a one-shot Job inside the cluster to run the Qwen3-235B Megatron recipe +# Requirements: +# - Ray head service reachable at rayml-efa-head-svc.default.svc.cluster.local:10001 +# - fsx-claim PVC available for /fsx +# - HF_TOKEN, REGISTRY/IMAGE/TAG set in env_vars + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +ENV_VARS_FILE="${SCRIPT_DIR}/env_vars" + +if [ ! -f "${ENV_VARS_FILE}" ]; then + echo "Missing ${ENV_VARS_FILE}. Copy env_vars.example and set your values." + exit 1 +fi + +source "${ENV_VARS_FILE}" + +if [ -z "${HF_TOKEN:-}" ]; then + echo "HF_TOKEN must be set in ${ENV_VARS_FILE}." + exit 1 +fi + +IMAGE="${REGISTRY}${IMAGE}:${TAG}" +JOB_NAME="qwen3-235b-submit" +RAY_ADDRESS_DEFAULT="ray://rayml-efa-head-svc.default.svc.cluster.local:10001" +WORKING_DIR="/workspace/verl" +TRAIN_FILE_DEFAULT="/fsx/verl/data/geo3k/train.parquet" +TEST_FILE_DEFAULT="/fsx/verl/data/geo3k/test.parquet" +TRAIN_FILE="${TRAIN_FILE:-$TRAIN_FILE_DEFAULT}" +TEST_FILE="${TEST_FILE:-$TEST_FILE_DEFAULT}" +MODEL_PATH=/fsx/verl/models/Qwen3-VL-235B-A22B-Instruct #"${MODEL_PATH:-Qwen/Qwen3-VL-235B-A22B-Instruct}" +ENGINE="${ENGINE:-vllm}" +GEN_TP="${GEN_TP:-16}" +CP="${CP:-1}" +TP="${TP:-1}" +PP="${PP:-4}" +EP="${EP:-8}" +ETP="${ETP:-1}" + +cat <