|
| 1 | +# Copyright (c) Meta Platforms, Inc. and affiliates. |
| 2 | +# |
| 3 | +# This source code is licensed under the MIT license found in the |
| 4 | +# LICENSE file in the root directory of this source tree. |
| 5 | + |
| 6 | +import time |
| 7 | + |
| 8 | +import torch |
| 9 | +import torchrl |
| 10 | +import torchrl.envs |
| 11 | +import torchrl.modules.mcts |
| 12 | +from tensordict import TensorDict |
| 13 | +from torchrl.data import Composite, Unbounded |
| 14 | +from torchrl.envs import Transform |
| 15 | + |
| 16 | +pgn_or_fen = "fen" |
| 17 | +mask_actions = True |
| 18 | + |
| 19 | +env = torchrl.envs.ChessEnv( |
| 20 | + include_pgn=False, |
| 21 | + include_fen=True, |
| 22 | + include_hash=True, |
| 23 | + include_hash_inv=True, |
| 24 | + include_san=True, |
| 25 | + stateful=True, |
| 26 | + mask_actions=mask_actions, |
| 27 | +) |
| 28 | + |
| 29 | + |
| 30 | +class TurnBasedChess(Transform): |
| 31 | + def transform_observation_spec(self, obsspec): |
| 32 | + obsspec["agent0", "turn"] = Unbounded(dtype=torch.bool, shape=()) |
| 33 | + obsspec["agent1", "turn"] = Unbounded(dtype=torch.bool, shape=()) |
| 34 | + return obsspec |
| 35 | + |
| 36 | + def transform_reward_spec(self, reward_spec): |
| 37 | + reward = reward_spec["reward"].clone() |
| 38 | + del reward_spec["reward"] |
| 39 | + return Composite( |
| 40 | + agent0=Composite(reward=reward), |
| 41 | + agent1=Composite(reward=reward), |
| 42 | + ) |
| 43 | + |
| 44 | + def _reset(self, _td, td): |
| 45 | + td["agent0", "turn"] = td["turn"] |
| 46 | + td["agent1", "turn"] = ~td["turn"] |
| 47 | + return td |
| 48 | + |
| 49 | + def _step(self, td, td_next): |
| 50 | + td_next["agent0", "turn"] = td_next["turn"] |
| 51 | + td_next["agent1", "turn"] = ~td_next["turn"] |
| 52 | + |
| 53 | + reward = td_next["reward"] |
| 54 | + turn = td["turn"] |
| 55 | + |
| 56 | + if reward == 0.5: |
| 57 | + reward = 0 |
| 58 | + elif reward == 1: |
| 59 | + if not turn: |
| 60 | + reward = -reward |
| 61 | + |
| 62 | + td_next["agent0", "reward"] = reward |
| 63 | + td_next["agent1", "reward"] = -reward |
| 64 | + del td_next["reward"] |
| 65 | + |
| 66 | + return td_next |
| 67 | + |
| 68 | + |
| 69 | +env = env.append_transform(TurnBasedChess()) |
| 70 | +env.rollout(3) |
| 71 | + |
| 72 | +forest = torchrl.data.MCTSForest() |
| 73 | +forest.reward_keys = env.reward_keys |
| 74 | +forest.done_keys = env.done_keys |
| 75 | +forest.action_keys = env.action_keys |
| 76 | + |
| 77 | +if mask_actions: |
| 78 | + forest.observation_keys = [ |
| 79 | + f"{pgn_or_fen}_hash", |
| 80 | + "turn", |
| 81 | + "action_mask", |
| 82 | + ("agent0", "turn"), |
| 83 | + ("agent1", "turn"), |
| 84 | + ] |
| 85 | +else: |
| 86 | + forest.observation_keys = [ |
| 87 | + f"{pgn_or_fen}_hash", |
| 88 | + "turn", |
| 89 | + ("agent0", "turn"), |
| 90 | + ("agent1", "turn"), |
| 91 | + ] |
| 92 | + |
| 93 | + |
| 94 | +def tree_format_fn(tree): |
| 95 | + td = tree.rollout[-1]["next"] |
| 96 | + return [ |
| 97 | + td["san"], |
| 98 | + td[pgn_or_fen].split("\n")[-1], |
| 99 | + tree.wins, |
| 100 | + tree.visits, |
| 101 | + ] |
| 102 | + |
| 103 | + |
| 104 | +def get_best_move(fen, mcts_steps, rollout_steps): |
| 105 | + root = env.reset(TensorDict({"fen": fen})) |
| 106 | + agent_keys = ["agent0", "agent1"] |
| 107 | + mcts = torchrl.modules.mcts.MCTS(mcts_steps, rollout_steps, agent_keys=agent_keys) |
| 108 | + tree = mcts(forest, root, env) |
| 109 | + moves = [] |
| 110 | + |
| 111 | + for subtree in tree.subtree: |
| 112 | + td = subtree.rollout[0] |
| 113 | + san = td["next", "san"] |
| 114 | + active_agent = agent_keys[ |
| 115 | + torch.stack([td[agent]["turn"] for agent in agent_keys]).nonzero() |
| 116 | + ] |
| 117 | + reward_sum = subtree.wins[active_agent, "reward"] |
| 118 | + visits = subtree.visits |
| 119 | + value_avg = (reward_sum / visits).item() |
| 120 | + moves.append((value_avg, san)) |
| 121 | + |
| 122 | + moves = sorted(moves, key=lambda x: -x[0]) |
| 123 | + |
| 124 | + # print(tree.to_string(tree_format_fn)) |
| 125 | + |
| 126 | + print("------------------") |
| 127 | + for value_avg, san in moves: |
| 128 | + print(f" {value_avg:0.02f} {san}") |
| 129 | + print("------------------") |
| 130 | + |
| 131 | + return moves[0][1] |
| 132 | + |
| 133 | + |
| 134 | +for idx in range(3): |
| 135 | + print("==========") |
| 136 | + print(idx) |
| 137 | + print("==========") |
| 138 | + torch.manual_seed(idx) |
| 139 | + |
| 140 | + start_time = time.time() |
| 141 | + |
| 142 | + # White has M1, best move Rd8#. Any other moves lose to M2 or M1. |
| 143 | + fen0 = "7k/6pp/7p/7K/8/8/6q1/3R4 w - - 0 1" |
| 144 | + assert get_best_move(fen0, 40, 10) == "Rd8#" |
| 145 | + |
| 146 | + # Black has M1, best move Qg6#. Other moves give rough equality or worse. |
| 147 | + fen1 = "6qk/2R4p/7K/8/8/8/8/4R3 b - - 1 1" |
| 148 | + assert get_best_move(fen1, 40, 10) == "Qg6#" |
| 149 | + |
| 150 | + # White has M2, best move Rxg8+. Any other move loses. |
| 151 | + fen2 = "2R3qk/5p1p/7K/8/8/8/5r2/2R5 w - - 0 1" |
| 152 | + assert get_best_move(fen2, 600, 10) == "Rxg8+" |
| 153 | + |
| 154 | + # Black has M2, best move Rxg1+. Any other move loses. |
| 155 | + fen3 = "2r5/5R2/8/8/8/7k/5P1P/2r3QK b - - 0 1" |
| 156 | + assert get_best_move(fen3, 600, 10) == "Rxg1+" |
| 157 | + |
| 158 | + end_time = time.time() |
| 159 | + total_time = end_time - start_time |
| 160 | + |
| 161 | + print(f"Took {total_time} s") |
0 commit comments