diff --git a/docs/sphinx_doc/assets/agentscope_frozenlake_reward_bench.png b/docs/sphinx_doc/assets/agentscope_frozenlake_reward_bench.png new file mode 100644 index 0000000000..6d95ff172c Binary files /dev/null and b/docs/sphinx_doc/assets/agentscope_frozenlake_reward_bench.png differ diff --git a/docs/sphinx_doc/assets/agentscope_frozenlake_reward_train.png b/docs/sphinx_doc/assets/agentscope_frozenlake_reward_train.png new file mode 100644 index 0000000000..e610759641 Binary files /dev/null and b/docs/sphinx_doc/assets/agentscope_frozenlake_reward_train.png differ diff --git a/docs/sphinx_doc/source/tutorial/align_with_verl.md b/docs/sphinx_doc/source/tutorial/align_with_verl.md index 0fad0d2884..f3714e2154 100644 --- a/docs/sphinx_doc/source/tutorial/align_with_verl.md +++ b/docs/sphinx_doc/source/tutorial/align_with_verl.md @@ -1,4 +1,4 @@ -# How to align configuration with veRL +# Align configuration with veRL This guide provides guidance for users familiar with [veRL](https://github.com/volcengine/verl) to align the parameters and metrics in Trinity-RFT with the ones in veRL. diff --git a/docs/sphinx_doc/source_zh/tutorial/align_with_verl.md b/docs/sphinx_doc/source_zh/tutorial/align_with_verl.md index 4825ad69d8..a884dbcf82 100644 --- a/docs/sphinx_doc/source_zh/tutorial/align_with_verl.md +++ b/docs/sphinx_doc/source_zh/tutorial/align_with_verl.md @@ -1,4 +1,4 @@ -# 如何和 veRL 对齐配置 +# 与 veRL 对齐训练配置 本指南为熟悉 [veRL](https://github.com/volcengine/verl) 的用户提供了将 Trinity-RFT 与 veRL 的参数和指标对齐的方法。 diff --git a/examples/agentscope_frozenlake/README.md b/examples/agentscope_frozenlake/README.md new file mode 100644 index 0000000000..de3b2b5626 --- /dev/null +++ b/examples/agentscope_frozenlake/README.md @@ -0,0 +1,22 @@ +# Frozen Lake Agent + +This example shows the implementation of a Frozen Lake agent using the Agentscope framework. The agent is designed to navigate a frozen lake environment by interpreting observations and selecting appropriate actions. + +The data preparation and environment setup are the same as those in the [GRPO Frozen Lake example](../grpo_frozen_lake/README.md). Please follow the instructions there to set up the environment and prepare the dataset. + + +## Results + +The configuration file for this example is located at [`frozenlake_agent.yaml`](./frozenlake_agent.yaml). We use Qwen2.5-3B-Instruct as the base LLM for the agent. + +The training and evaluation dataset is generated using the same process as described in the [GRPO Frozen Lake example](../grpo_frozen_lake/README.md) with the following command: + +```bash +cd examples/grpo_frozen_lake +python get_frozen_lake_data.py --test_size 50 --map_max_size 10 +``` + +The training result is shown below, demonstrating the reward during training and evaluation phases: + + + diff --git a/examples/agentscope_frozenlake/__init__.py b/examples/agentscope_frozenlake/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/examples/agentscope_frozenlake/agent.py b/examples/agentscope_frozenlake/agent.py new file mode 100644 index 0000000000..f4788e67f9 --- /dev/null +++ b/examples/agentscope_frozenlake/agent.py @@ -0,0 +1,74 @@ +import re + +from agentscope.agent import ReActAgent +from agentscope.formatter import OpenAIChatFormatter +from agentscope.message import Msg +from agentscope.model import OpenAIChatModel + +from examples.agentscope_frozenlake.utils import SYSTEM_PROMPT, FrozenLakeAction + +INVALID_ACTION = "still" +VALID_ACTIONS = { + "left": 1, + "down": 2, + "right": 3, + "up": 4, +} + + +class FrozenLakeAgent: + def __init__(self, model: OpenAIChatModel, max_steps: int = 20): + self.model = model + self.agent = ReActAgent( + name="frozenlake_agent", + sys_prompt=SYSTEM_PROMPT, + model=model, + formatter=OpenAIChatFormatter(), + max_iters=2, + ) + self.response_structure = FrozenLakeAction + self.current_step = 0 + self.last_action = None + self.last_observation = None + self.max_steps = max_steps + + def get_prompt(self, observation: str) -> str: + prompt = ( + f"Current Observation ({self.current_step}): \n" + + observation + + "\n" + + "You have not achieved the goal, P has not reached G yet. Please give the next action." + ) + if self.current_step > 0 and self.last_action is not None: + if self.last_observation == observation: + prompt += "\nYour last response is invalid. Your position didn't change at all. You may need to recheck your thinking process, action outputted, and the format of response. Remember, you should only output the NEXT ACTION at each interation in the ``` ```. For example, if you want to move up, you should output ```Up```." + + if self.max_steps is not None and self.max_steps - self.current_step > 0: + prompt += ( + f"\nThe maximum number of steps remaining is {self.max_steps - self.current_step}." + ) + + return prompt + + def get_action(self, msg: Msg) -> str: + response: str = msg.content if isinstance(msg.content, str) else msg.content[0].get("text") + action = INVALID_ACTION + + matches = re.findall(r"```(.*?)```", response, re.DOTALL) + + if matches: + last_match_content = matches[-1].strip() + action = last_match_content.lower() + if action not in VALID_ACTIONS: + action = INVALID_ACTION + + return action + + async def step(self, current_observation: str) -> str: + prompt = self.get_prompt(current_observation) + response = await self.agent.reply(Msg("user", prompt, role="user")) + action = self.get_action(response) + self.last_observation = current_observation + self.last_action = action + self.current_step += 1 + return action diff --git a/examples/agentscope_frozenlake/env.py b/examples/agentscope_frozenlake/env.py new file mode 100644 index 0000000000..3de4a26857 --- /dev/null +++ b/examples/agentscope_frozenlake/env.py @@ -0,0 +1,210 @@ +import copy +from typing import Dict, Optional, Tuple + +import numpy as np + +from examples.agentscope_frozenlake.utils import generate_random_map, get_goal_position +from trinity.utils.log import get_logger + +try: + from gymnasium.envs.toy_text.frozen_lake import FrozenLakeEnv as GymFrozenLakeEnv +except ImportError: + GymFrozenLakeEnv = object + + +class FrozenLakeEnv(GymFrozenLakeEnv): + # Map gym state in integer + MAP_LOOKUP = { + b"P": 0, + b"F": 1, + b"H": 2, + b"G": 3, + } + + # Define rules to transform to rendered text observation of the environment + GRID_LOOKUP = { + 0: " P \t", # player + 1: " _ \t", # frozen + 2: " O \t", # hole + 3: " G \t", # goal + 4: " X \t", # player fall into hole + 5: " √ \t", # player on goal + } + + ACTION_LOOKUP = { + "still": 0, + "left": 1, + "down": 2, + "right": 3, + "up": 4, + } + + INVALID_ACTION = 0 + PENALTY_FOR_INVALID = -1 + + def __init__( + self, + max_steps: int = 8, + desc: Optional[str] = None, + is_slippery: bool = False, + size: int = 8, + p: float = 0.8, + seed: int = 42, + ): + self.logger = get_logger() + self.max_steps = max_steps or 8 + self.desc = desc + self.is_slippery = is_slippery + self.size = size + self.p = p + self.seed = seed + try: + import gymnasium as gym + from gymnasium.envs.toy_text.frozen_lake import ( + FrozenLakeEnv as GymFrozenLakeEnv, + ) + except ImportError as e: + error_message = ( + f"Gymnasium is not installed. Please install gymnasium first before " + f"running the frozen_lake workflow. Error: {str(e)}" + ) + self.logger.error(error_message) + raise ImportError(error_message) + + if self.desc is None: + random_map, goal_position = generate_random_map( + size=self.size, p=self.p, seed=self.seed, max_steps=self.max_steps + ) + else: + random_map = np.asarray(copy.deepcopy(self.desc), dtype="c") + goal_position = get_goal_position(random_map) + + self.goal_position = goal_position + + GymFrozenLakeEnv.__init__(self, desc=random_map[:], is_slippery=self.is_slippery) + self.action_space = gym.spaces.Discrete(4, start=1) + + self.map_kwargs = { + "size": size, + "p": p, + } + self.env_kwargs = { + "is_slippery": is_slippery, + "desc": copy.deepcopy(desc), + "seed": seed, + } + + self.action_map = { + 1: 0, # left + 2: 1, # down + 3: 2, # right + 4: 3, # up + } + + def _get_player_position(self) -> Tuple[int, int]: + return (self.s // self.ncol, self.s % self.ncol) # (row, col) + + def step(self, action: str) -> Tuple[str, float, bool, Dict]: + """Execute a step in the environment. + + Maps custom action to gymnasium FrozenLakeEnv action and takes the step. + Checks if the action is effective (whether player moves in the env). + + Args: + action: The action to take. + + Returns: + Tuple of (observation, reward, done, info). + """ + if self.success(): + return self.render(), 1, True, {"action_is_effective": False} + + action_id: int = self.ACTION_LOOKUP.get(action.lower(), 0) + + if not action_id: + action_id = self.INVALID_ACTION + + if action_id == self.INVALID_ACTION or action_id not in self.action_map: + return self.render(), 0, False, {"action_is_effective": False} + + prev_player_position = int(self.s) + + player_pos, reward, done, _, _ = GymFrozenLakeEnv.step(self, self.action_map[action_id]) + + obs = self.render() + return obs, reward, done, {"action_is_effective": prev_player_position != int(player_pos)} + + def render(self, mode="tiny_rgb_array"): + """Render the environment. + + Args: + mode: Rendering mode. Options: "tiny_rgb_array", "list", "state", "rgb_array", "ansi". + + Returns: + Rendered observation based on the mode. + """ + assert mode in ["tiny_rgb_array", "list", "state", "rgb_array", "ansi"] + if mode in ["rgb_array", "ansi"]: + prev_render_mode = self.render_mode + self.render_mode = mode + obs = GymFrozenLakeEnv.render(self) + self.render_mode = prev_render_mode + return obs + room_state = copy.deepcopy(self.desc) + + # replace the position of start 'S' with 'F' + position_S = np.where(room_state == b"S") + room_state[position_S] = b"F" + + # replace the position of the player with 'P' + position_P = self._get_player_position() + room_state[position_P] = b"P" + + if mode == "state": + # transform 'S', 'F', 'H', 'G' to numpy integer array + room_state = np.vectorize(lambda x: self.MAP_LOOKUP[x])(room_state) + # add player in hole or player on goal + if self.desc[position_P] == b"H": + room_state[position_P] = 4 + elif self.desc[position_P] == b"G": + room_state[position_P] = 5 + return room_state + + room_state = self.render(mode="state").tolist() + + if mode == "list": + + def lookup(cell): + return self.GRID_LOOKUP.get(cell, "?").strip("\t").strip() + + return [" ".join(lookup(cell) for cell in row) for row in room_state] + + if mode == "tiny_rgb_array": + + def lookup(cell): + return self.GRID_LOOKUP.get(cell, "?") + + result = "\n".join("".join(lookup(cell) for cell in row) for row in room_state) + return result + + def reset(self, task: Optional[Dict] = None): + task = task or {} + self.__init__( # type: ignore [misc] + size=task.get("size", self.map_kwargs["size"]), + p=task.get("p", self.map_kwargs["p"]), + seed=task.get("seed", self.env_kwargs["seed"]), + is_slippery=task.get("is_slippery", self.env_kwargs["is_slippery"]), + ) + GymFrozenLakeEnv.reset(self, seed=self.seed) + return self.render(mode="tiny_rgb_array"), {} + + def finished(self) -> bool: + player_pos = self._get_player_position() + return self.desc[player_pos] in b"GH" # type: ignore [index,operator] + + def success(self): + """ + Check if the agent has reached the goal (G). + """ + player_pos = self._get_player_position() + return self.desc[player_pos] in b"G" diff --git a/examples/agentscope_frozenlake/frozenlake_agent.yaml b/examples/agentscope_frozenlake/frozenlake_agent.yaml new file mode 100644 index 0000000000..5144d9ecca --- /dev/null +++ b/examples/agentscope_frozenlake/frozenlake_agent.yaml @@ -0,0 +1,78 @@ +project: "FrozenLake" +name: "Qwen2.5-3B-Instruct-agent" +checkpoint_root_dir: ${oc.env:TRINITY_CHECKPOINT_ROOT_DIR,./checkpoints} +algorithm: + algorithm_type: multi_step_grpo + repeat_times: 16 + kl_loss_fn: "low_var_kl" + kl_loss_fn_args: + kl_coef: 0 + advantage_fn_args: + epsilon: 1e-6 + std_threshold: 0.0001 + enable_step_norm: true + optimizer: + lr: 1e-6 +model: + model_path: ${oc.env:TRINITY_MODEL_PATH,Qwen/Qwen2.5-3B-Instruct} + max_response_tokens: 2048 + max_model_len: 25600 + temperature: 1.0 +cluster: + node_num: 1 + gpu_per_node: 8 +buffer: + total_epochs: 1 + batch_size: 32 + train_batch_size: 1024 + explorer_input: + taskset: + name: frozenlake + storage_type: file + path: ${oc.env:TRINITY_TASKSET_PATH} + split: train + workflow_args: + env_max_steps: 8 + agent_max_steps: 10 + is_slippery: false + default_workflow_type: 'examples.agentscope_frozenlake.workflow.FrozenLakeWorkflow' + trainer_input: + experience_buffer: + name: frozenlake_experience_buffer + storage_type: queue + max_read_timeout: 7200 + replay_buffer: + enable: true + priority_fn: linear_decay + priority_fn_args: + decay: 0.1 +explorer: + eval_on_startup: true + eval_interval: 20 + runner_per_model: 8 + rollout_model: + engine_num: 6 + tensor_parallel_size: 1 + enable_chunked_prefill: true + enforce_eager: false + enable_openai_api: true + enable_log_requests: true + enable_history: true + enable_auto_tool_choice: true + tool_call_parser: hermes + # reasoning_parser: deepseek_r1 # if you use Qwen3 series, uncomment this line + enable_thinking: true + dtype: bfloat16 + seed: 42 + gpu_memory_utilization: 0.85 +trainer: + save_interval: 100 + use_dynamic_bsz: true + grad_clip: 1.0 + ulysses_sequence_parallel_size: 2 + +synchronizer: + sync_method: nccl + sync_style: dynamic_by_explorer + sync_interval: 1 + sync_timeout: 1200 diff --git a/examples/agentscope_frozenlake/utils.py b/examples/agentscope_frozenlake/utils.py new file mode 100644 index 0000000000..9cac5de81c --- /dev/null +++ b/examples/agentscope_frozenlake/utils.py @@ -0,0 +1,169 @@ +""" +Utils for the FrozenLake environment. +Modified from https://github.com/rllm-org/rllm/blob/main/rllm/environments/frozenlake/frozenlake.py +""" + +from typing import Literal, Optional, Tuple + +import numpy as np +from pydantic import BaseModel, Field + +# Map gym state in integer +MAP_LOOKUP = { + b"P": 0, + b"F": 1, + b"H": 2, + b"G": 3, +} + +# Define rules to transform to rendered text observation of the environment +GRID_LOOKUP = { + 0: " P \t", # player + 1: " _ \t", # frozen + 2: " O \t", # hole + 3: " G \t", # goal + 4: " X \t", # player fall into hole + 5: " √ \t", # player on goal +} + +ACTION_LOOKUP = { + 0: "None", + 1: "Left", + 2: "Down", + 3: "Right", + 4: "Up", +} + +# Prompting format inspired by the RAGEN project: https://github.com/RAGEN-AI/RAGEN +SYSTEM_PROMPT = """You are Qwen, created by Alibaba Cloud. You are a helpful assistant. You are walking on a frozen lake. + +FrozenLake Quick Guide +Goal: Reach the goal (G). Player (P) and Goal (G) must overlap. + +Symbols: +_ Frozen | O Hole | G Goal | P Player + +Rules: +1. Avoid falling into holes (O). +2. Frozen tiles are slippery, you may move perpendicular to your intended direction. + +Valid Action (separated by | ): +Up | Down | Left | Right + +Rewards: +Fall into hole: 0 +Reach goal: +1.0 + +You will be provided the current observation, please decide on the next Action. +You should show your thought process and then input the final action in ``` ```. +You should only output the NEXT ACTION at each interation in the ``` ```. For example, if you want to move up, you should output ```Up```. +You should plan ahead and need to achieve it in minimum number of steps. +You should be aware that frozen tiles can be slippery, but the chance is small and you should not overthink it. + +Please show your thinking process and put the final action in ``` ```. In every turn, the final action MUST be one of Up, Down, Left, Right. +""" + + +class FrozenLakeAction(BaseModel): + action: Literal["Up", "Down", "Left", "Right"] = Field( + description="The action to take in the FrozenLake environment, must be one of Up, Down, Left, Right", + ) + + +def is_valid(board: list[list[str]], max_size: int, max_steps: int) -> bool: + """DFS to check that it's a valid path. + + Args: + board: The board representation as a list of lists. + max_size: Maximum size of the board. + max_steps: Maximum number of steps allowed. + + Returns: + True if there's a valid path from start to goal within max_steps, False otherwise. + """ + frontier, discovered = [], set() + # find the start point + start_r, start_c = np.where(np.array(board) == "S") + frontier.append((start_r[0], start_c[0], 0)) # row, col steps + # dfs to check if there is a path from start to goal + while frontier: + r, c, steps = frontier.pop() + if steps > max_steps: + continue + + if (r, c) not in discovered: + discovered.add((r, c)) + directions = [(1, 0), (0, 1), (-1, 0), (0, -1)] + for x, y in directions: + r_new = r + x + c_new = c + y + if r_new < 0 or r_new >= max_size or c_new < 0 or c_new >= max_size: + continue + if board[r_new][c_new] == "G": + return True + if board[r_new][c_new] != "H": + frontier.append((r_new, c_new, steps + 1)) + return False + + +def generate_random_map( + size: int = 8, p: float = 0.8, seed: int = 0, max_steps: int = 5 +) -> Tuple[list[str], Tuple[int, int]]: + """Generates a random valid map (one that has a path from start to goal). + + Args: + size: Size of each side of the grid. + p: Probability that a tile is frozen. + seed: Seed to ensure the generation of reproducible maps. + max_steps: Maximum number of steps allowed. + + Returns: + A tuple containing a random valid map and the goal position (row, col). + """ + valid = False + board: list[list[str]] = [] # initialize to make pyright happy + + try: + from gymnasium.utils import seeding + + np_random, _ = seeding.np_random(seed) + except ImportError: + raise ImportError( + "Gymnasium is not installed. Please install gymnasium first before running the frozen_lake workflow." + ) + + # generate random start and end points + while not valid: + p = min(1, p) + board = np_random.choice(["F", "H"], (size, size), p=[p, 1 - p]).tolist() + + while True: + start_r = int(np_random.integers(0, size)) + start_c = int(np_random.integers(0, size)) + goal_r = int(np_random.integers(0, size)) + goal_c = int(np_random.integers(0, size)) + + # Ensure start and goal are different positions + if (start_r, start_c) != (goal_r, goal_c): + break + + board[start_r][start_c] = "S" + board[goal_r][goal_c] = "G" + + valid = is_valid(board, size, max_steps) + return ["".join(x) for x in board], (goal_r, goal_c) + + +def get_goal_position(random_map: np.ndarray) -> Optional[Tuple[int, int]]: + """Get the goal position from a random map. + + Args: + random_map: The map as a numpy array. + + Returns: + Tuple of (row, col) if goal found, None otherwise. + """ + positions = np.argwhere(random_map == b"G") + if positions.size == 0: + return None # G not found + return tuple(positions[0]) # returns (row, col) diff --git a/examples/agentscope_frozenlake/workflow.py b/examples/agentscope_frozenlake/workflow.py new file mode 100644 index 0000000000..4c1f175642 --- /dev/null +++ b/examples/agentscope_frozenlake/workflow.py @@ -0,0 +1,161 @@ +# -*- coding: utf-8 -*- +""" +This file defines a multi-step workflow for the FrozenLake environment. +Modified from https://github.com/rllm-org/rllm/blob/main/rllm/environments/frozenlake/frozenlake.py +""" + +from __future__ import annotations + +from typing import List, Optional + +from trinity.common.experience import Experience +from trinity.common.models.model import ModelWrapper +from trinity.common.workflows.workflow import Task, Workflow + + +class FrozenLakeWorkflow(Workflow): + """ + FrozenLake environment for multi-step workflows. + + ## Description + The game starts with the player at random location of the frozen lake grid world with the + goal located at another random location for the 4x4 environment. + + ## Action Space + The action shape is `(1,)` in the range `{0, 3}` indicating + which direction to move the player. + NOTE the action space is different from gymnasium.envs.toy_text.frozen_lake.FrozenLakeEnv, start from 1 + use action_map to map from custom action to action defined in FrozenLakeEnv in gymnasium + - 0: Still + - 1: Left + - 2: Down + - 3: Right + - 4: Up + + ## Starting State + The episode starts with the player at random location + + ## Rewards + Reward schedule: + - Reach goal: +1 + - Reach hole: 0 + - Reach frozen: 0 + + ## Arguments + `is_slippery`: if action is left and is_slippery is True, then: + - P(move left)=1/3 + - P(move up)=1/3 + - P(move down)=1/3 + + ## Example + P _ _ _ + _ _ _ O + O _ O _ + O _ _ G + """ + + can_reset: bool = False # GymFrozenLakeEnv can only reset the player position, not the environment configuration. + is_async: bool = True + can_repeat: bool = False + + def __init__( + self, + model: ModelWrapper, + task: Task, + auxiliary_models: Optional[List] = None, + ): + """Initialize the FrozenLake workflow. + + Args: + model: The model wrapper to use for generating actions. + task: The task configuration containing workflow-specific arguments. + auxiliary_models: Optional list of auxiliary models. + """ + super().__init__( + model=model, + task=task, + auxiliary_models=auxiliary_models, + ) + + # Extract workflow-specific arguments + workflow_args = task.workflow_args if hasattr(task, "workflow_args") else {} + self.env_max_steps = workflow_args.get("env_max_steps", 8) + self.agent_max_steps = workflow_args.get("agent_max_steps", 10) + self.desc = workflow_args.get("desc", None) + self.is_slippery = workflow_args.get("is_slippery", False) + self.max_response_tokens = self.rollout_args.get("max_response_tokens", 10240) + + # Extract task-specific arguments + self.raw_task = task.raw_task if hasattr(task, "raw_task") else {} + self.size = self.raw_task.get("size", 1) + self.p = self.raw_task.get("p", 0.8) + self.seed = self.raw_task.get("seed", 42) + + from agentscope.model import OpenAIChatModel + + from examples.agentscope_frozenlake.agent import FrozenLakeAgent + from examples.agentscope_frozenlake.env import FrozenLakeEnv + + self.agentscope_model = OpenAIChatModel( + api_key="EMPTY", + model_name=model.model_path, + generate_kwargs=self.rollout_args, + stream=False, + ) + + self.agentscope_model.client = self.model.get_openai_async_client() + self.agent = FrozenLakeAgent(model=self.agentscope_model, max_steps=self.agent_max_steps) + self.env = FrozenLakeEnv( + max_steps=self.env_max_steps, + desc=self.desc, + is_slippery=self.is_slippery, + size=self.size, + p=self.p, + seed=self.seed, + ) + + @property + def rollout_args(self): + return { + "temperature": self.task.rollout_args.temperature, + "max_tokens": self.task.rollout_args.max_tokens, + } + + async def run_async(self) -> List[Experience]: + self.env.reset(self.task.raw_task) + terminate_reason = None + observation_str = str(self.env.render()) + rewards = [] + step_count = 0 + done = False + for _ in range(self.agent_max_steps): + step_count += 1 + try: + action = await self.agent.step(current_observation=observation_str) + except Exception as e: + self.logger.error(f"Agent failed to produce action due to error: {e}") + terminate_reason = "agent_error" + break + observation, reward, done, _ = self.env.step(action) + observation_str = str(observation) + rewards.append(reward) + if done: + terminate_reason = "success" + break + + if terminate_reason is None: + terminate_reason = "max_steps_reached" + + final_reward = sum(rewards) + exps = self.model.extract_experience_from_history() + for exp in exps: + exp.reward = final_reward + exp.info["terminate_reason"] = terminate_reason + + if len(exps) > 0: + exps[-1].metrics = { + "env_steps": step_count, + "env_done": int(done), + "final_reward": final_reward, + } + return exps diff --git a/examples/grpo_frozen_lake/frozen_lake.yaml b/examples/grpo_frozen_lake/frozen_lake.yaml index 84c5022374..038a924e90 100644 --- a/examples/grpo_frozen_lake/frozen_lake.yaml +++ b/examples/grpo_frozen_lake/frozen_lake.yaml @@ -7,11 +7,11 @@ algorithm: optimizer: lr: 1e-6 policy_loss_fn_args: - loss_agg_mode: "seq-mean-token-sum" clip_range_low: 0.2 clip_range_high: 0.28 kl_loss_fn_args: kl_coef: 0.0 + loss_agg_mode: "seq-mean-token-sum" model: model_path: ${oc.env:TRINITY_MODEL_PATH,Qwen/Qwen2.5-3B-Instruct} enable_prompt_truncation: false diff --git a/pyproject.toml b/pyproject.toml index 098e1e1074..225079c305 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -53,7 +53,7 @@ data = [ "py-data-juicer>=1.4.3" ] agent = [ - "agentscope>=1.0.5" + "agentscope>=1.0.9" ] rm_gallery = [ "rm-gallery>=0.1.5" diff --git a/tests/explorer/workflow_test.py b/tests/explorer/workflow_test.py index 882786213d..b0df0369a5 100644 --- a/tests/explorer/workflow_test.py +++ b/tests/explorer/workflow_test.py @@ -761,3 +761,6 @@ async def test_workflow_with_openai(self): self.assertEqual(len(exps), 2) exps = runner.model_wrapper.extract_experience_from_history(clear_history=False) self.assertEqual(len(exps), 0) + + def tearDown(self): + ray.shutdown(_exiting_interpreter=True) diff --git a/tests/trainer/trainer_test.py b/tests/trainer/trainer_test.py index c22780d811..d6d736d73d 100644 --- a/tests/trainer/trainer_test.py +++ b/tests/trainer/trainer_test.py @@ -179,7 +179,11 @@ def test_trainer(self): # Trainer: # | 1 | 2 |sync| 3 | 4 | # |---|---|sync|---|---| - self.config.buffer.total_epochs = 1 + self.config.buffer.batch_size = 6 + self.config.buffer.total_steps = 4 + # use 3 GPU in a 2 x 2 cluster, the trainer only have 1 GPU + self.config.explorer.rollout_model.engine_num = 3 + self.config.explorer.rollout_model.tensor_parallel_size = 1 self.config.buffer.explorer_input.taskset = get_unittest_dataset_config("countdown") self.config.trainer.save_interval = 4 self.config.synchronizer.sync_interval = 2 diff --git a/trinity/buffer/pipelines/experience_pipeline.py b/trinity/buffer/pipelines/experience_pipeline.py index e978ac9ace..05ef640f94 100644 --- a/trinity/buffer/pipelines/experience_pipeline.py +++ b/trinity/buffer/pipelines/experience_pipeline.py @@ -60,14 +60,17 @@ def _init_input_storage( StorageConfig( storage_type=StorageType.FILE.value, path=pipeline_config.input_save_path, + schema_type="experience", wrap_in_ray=False, ), ) elif is_database_url(pipeline_config.input_save_path): return get_buffer_writer( StorageConfig( + name="pipeline_input", storage_type=StorageType.SQL.value, path=pipeline_config.input_save_path, + schema_type="experience", wrap_in_ray=False, ), ) diff --git a/trinity/buffer/storage/sql.py b/trinity/buffer/storage/sql.py index ea3bf4342c..e3068fd896 100644 --- a/trinity/buffer/storage/sql.py +++ b/trinity/buffer/storage/sql.py @@ -34,6 +34,9 @@ def __init__(self, config: StorageConfig) -> None: self.logger = get_logger(f"sql_{config.name}", in_ray_actor=True) if not config.path: raise ValueError("`path` is required for SQL storage type.") + self.logger.info( + f"Init engine {config.path} with table {config.name} with schema {config.schema_type}" + ) self.engine, self.table_model_cls = init_engine( db_url=config.path, table_name=config.name, diff --git a/trinity/buffer/viewer.py b/trinity/buffer/viewer.py index 32b647ddd3..86d75240e7 100644 --- a/trinity/buffer/viewer.py +++ b/trinity/buffer/viewer.py @@ -49,7 +49,7 @@ def get_color_for_action_mask(action_mask_value: int) -> str: return "#ffcdd2" -def render_experience(exp: Experience, exp_index: int, tokenizer): +def render_experience(exp: Experience, tokenizer): """Render a single experience sequence in Streamlit.""" token_ids = exp.tokens logprobs = exp.logprobs @@ -91,76 +91,57 @@ def html_escape(text): .replace("'", "'") ) - # Build full HTML (with CSS) - html = f""" + # === Use Streamlit Native Components for Prompt and Response === + st.subheader(f"Experience [{exp.eid}]") + + # Prompt section using st.text_area + st.markdown("**📝 Prompt:**") + st.code(prompt_text, language=None, wrap_lines=True, line_numbers=True) + + # Response section using st.text_area + st.markdown("**💬 Response:**") + st.code(response_text, language=None, wrap_lines=True, line_numbers=True) + + # Reward and other info + st.markdown("**🏆 Reward and Other Info:**") + reward, info, metrics = st.columns(3) + reward.metric("**Reward:**", f"{exp.reward or 0.0:.4f}") + metrics.markdown("**Metrics:**") + metrics.json(exp.metrics or {}, expanded=False) + info.markdown("**Info:**") + info.json(exp.info or {}, expanded=False) + + # Response Tokens Detail section using components.html + st.markdown("**🔍 Response Tokens Detail:**") + + # Build HTML only for Response Tokens Detail + html = """
-