From 9b50fd52ce237e9b3e13d3f07eaccf3453bc0b6e Mon Sep 17 00:00:00 2001 From: reinis Date: Thu, 5 Jun 2025 16:49:54 +0200 Subject: [PATCH 01/12] first impl of MARL --- robot_nav/models/CNNTD3/CNNTD3.py | 343 +++++++++++++++-- robot_nav/models/CNNTD3/att.py | 614 ++++++++++++++++++++++++++++++ robot_nav/models/TD3/TD3.py | 2 +- robot_nav/multi_robot_world.yaml | 58 +++ robot_nav/multi_robot_world2.yaml | 27 ++ robot_nav/multi_train.py | 145 +++++++ robot_nav/multi_train2.py | 154 ++++++++ robot_nav/replay_buffer.py | 4 +- robot_nav/sim.py | 129 +++++-- robot_nav/sim2.py | 254 ++++++++++++ robot_nav/train.py | 8 +- 11 files changed, 1658 insertions(+), 80 deletions(-) create mode 100644 robot_nav/models/CNNTD3/att.py create mode 100644 robot_nav/multi_robot_world.yaml create mode 100644 robot_nav/multi_robot_world2.yaml create mode 100644 robot_nav/multi_train.py create mode 100644 robot_nav/multi_train2.py create mode 100644 robot_nav/sim2.py diff --git a/robot_nav/models/CNNTD3/CNNTD3.py b/robot_nav/models/CNNTD3/CNNTD3.py index 0be20da..31b6946 100644 --- a/robot_nav/models/CNNTD3/CNNTD3.py +++ b/robot_nav/models/CNNTD3/CNNTD3.py @@ -10,6 +10,262 @@ from robot_nav.utils import get_max_bound +class Attention(nn.Module): + def __init__(self, embedding_dim): + super(Attention, self).__init__() + self.embedding_dim = embedding_dim + + # CNN for laser scan + self.cnn1 = nn.Conv1d(1, 4, kernel_size=8, stride=4) + self.cnn2 = nn.Conv1d(4, 8, kernel_size=8, stride=4) + self.cnn3 = nn.Conv1d(8, 4, kernel_size=4, stride=2) + + # Embedding for goal and action + self.goal_embed = nn.Linear(3, 10) + self.action_embed = nn.Linear(2, 10) + + # Output of CNN + goal + action embeddings + self.layer_1 = nn.Linear(24, 48) + + # Hard attention MLP with distance + self.hard_mlp = nn.Sequential( + nn.Linear(embedding_dim * 2 + 1, embedding_dim), + nn.ReLU(), + nn.Linear(embedding_dim, embedding_dim) + ) + self.hard_encoding = nn.Linear(embedding_dim, 2) + + # Soft attention projections + self.q = nn.Linear(embedding_dim, embedding_dim, bias=False) + self.k = nn.Linear(embedding_dim, embedding_dim, bias=False) + self.v = nn.Linear(embedding_dim, embedding_dim) + + # Soft attention score network (with distance) + self.attn_score_layer = nn.Sequential( + nn.Linear(embedding_dim + 1, embedding_dim), + nn.ReLU(), + nn.Linear(embedding_dim, 1) + ) + + # Decoder + self.decode_1 = nn.Linear(embedding_dim * 2, 400) + nn.init.kaiming_uniform_(self.decode_1.weight, nonlinearity="leaky_relu") + self.decode_2 = nn.Linear(400, 300) + nn.init.kaiming_uniform_(self.decode_2.weight, nonlinearity="leaky_relu") + self.decode_3 = nn.Linear(300, 2) + self.tanh = nn.Tanh() + + def encode_agent_features(self, laser, goal, act): + laser = laser.unsqueeze(1) + l = F.leaky_relu(self.cnn1(laser)) + l = F.leaky_relu(self.cnn2(l)) + l = F.leaky_relu(self.cnn3(l)) + l = l.flatten(start_dim=1) + + g = F.leaky_relu(self.goal_embed(goal)) + a = F.leaky_relu(self.action_embed(act)) + + features = torch.cat((l, g, a), dim=-1) + return F.leaky_relu(self.layer_1(features)) + + def forward(self, embedding): + if embedding.dim() == 2: + embedding = embedding.unsqueeze(0) + batch_size, n_agents, _ = embedding.shape + + laser = embedding[:, :, :-7].reshape(batch_size * n_agents, -1) + goal = embedding[:, :, -7:-4].reshape(batch_size * n_agents, -1) + act = embedding[:, :, -4:-2].reshape(batch_size * n_agents, -1) + position = embedding[:, :, -2:].reshape(batch_size, n_agents, 2) + + agent_embed = self.encode_agent_features(laser, goal, act) + agent_embed = agent_embed.view(batch_size, n_agents, self.embedding_dim) + + # Hard attention + h_i = agent_embed.unsqueeze(2) # (B, N, 1, D) + h_j = agent_embed.unsqueeze(1) # (B, 1, N, D) + pos_i = position.unsqueeze(2) # (B, N, 1, 2) + pos_j = position.unsqueeze(1) # (B, 1, N, 2) + + pairwise_dist = torch.norm(pos_i - pos_j, dim=-1, keepdim=True) # (B, N, N, 1) + + mask = ~torch.eye(n_agents, dtype=torch.bool, device=embedding.device) + + h_i = h_i.expand(-1, -1, n_agents, -1) + h_j = h_j.expand(-1, n_agents, -1, -1) + d_ij = pairwise_dist # already correct shape + + hard_pairs = torch.cat([h_i, h_j], dim=-1) # (B, N, N, 2D) + hard_pairs = hard_pairs[:, mask].reshape(batch_size * n_agents, n_agents - 1, self.embedding_dim * 2) + d_ij = d_ij[:, mask].reshape(batch_size * n_agents, n_agents - 1, 1) + + hard_input = torch.cat([hard_pairs, d_ij], dim=-1) # (B*N, N-1, 2D+1) + h_hard = self.hard_mlp(hard_input) + hard_logits = self.hard_encoding(h_hard) + hard_weights = F.gumbel_softmax(hard_logits, tau=0.01, dim=-1)[..., 1].unsqueeze(2) + hard_weights = hard_weights.view(batch_size, n_agents, n_agents - 1) + + # Soft attention + q = self.q(agent_embed) # (B, N, D) + k = self.k(agent_embed) + v = F.relu(self.v(agent_embed)) + + attention_outputs = [] + for i in range(n_agents): + q_i = q[:, i].unsqueeze(1) # (B, 1, D) + k_j = torch.cat([k[:, :i], k[:, i+1:]], dim=1) # (B, N-1, D) + v_j = torch.cat([v[:, :i], v[:, i+1:]], dim=1) + pos_i = position[:, i].unsqueeze(1) # (B, 1, 2) + pos_j = torch.cat([position[:, :i], position[:, i+1:]], dim=1) # (B, N-1, 2) + dist_ij = torch.norm(pos_i - pos_j, dim=-1, keepdim=True) # (B, N-1, 1) + + q_i_ext = q_i.expand(-1, k_j.shape[1], -1) # (B, N-1, D) + qk_comb = q_i_ext * k_j # (B, N-1, D) + qk_with_dist = torch.cat([qk_comb, dist_ij], dim=-1) # (B, N-1, D+1) + + scores = self.attn_score_layer(qk_with_dist).transpose(1, 2) # (B, 1, N-1) + soft_weights = F.softmax(scores, dim=-1) # (B, 1, N-1) + + h_weights = hard_weights[:, i].unsqueeze(1) # (B, 1, N-1) + v_j = v_j.unsqueeze(1) # (B, 1, N-1, D) + combined_weights = soft_weights * h_weights # (B, 1, N-1) + attn_output = (v_j * combined_weights.unsqueeze(-1)).sum(dim=2) # (B, 1, D) + attention_outputs.append(attn_output.squeeze(1)) # (B, D) + + attn_stack = torch.stack(attention_outputs, dim=1).reshape(-1, self.embedding_dim) + self_embed = agent_embed.reshape(-1, self.embedding_dim) + concat_embed = torch.cat([self_embed, attn_stack], dim=-1) + + # Decode + x = F.leaky_relu(self.decode_1(concat_embed)) + x = F.leaky_relu(self.decode_2(x)) + action = self.tanh(self.decode_3(x)) + return action, hard_weights + + +class HardAttention(nn.Module): + def __init__(self, embedding_dim): + super(HardAttention, self).__init__() + self.embedding_dim = embedding_dim + # self.hard_bi_GRU = nn.GRU(args.rnn_hidden_dim * 2, args.rnn_hidden_dim, bidirectional=True) + self.cnn1 = nn.Conv1d(1, 4, kernel_size=8, stride=4) + self.cnn2 = nn.Conv1d(4, 8, kernel_size=8, stride=4) + self.cnn3 = nn.Conv1d(8, 4, kernel_size=4, stride=2) + + self.goal_embed = nn.Linear(3, 10) + self.action_embed = nn.Linear(2, 10) + + self.layer_1 = nn.Linear(24, 48) + + self.hard_encoding = nn.Linear(embedding_dim, 2) + + self.hard_mlp = nn.Sequential( + nn.Linear(embedding_dim * 2, embedding_dim), + nn.ReLU(), + nn.Linear(embedding_dim, embedding_dim) + ) + + # Soft + self.q = nn.Linear(embedding_dim, embedding_dim, bias=False) + self.k = nn.Linear(embedding_dim, embedding_dim, bias=False) + self.v = nn.Linear(embedding_dim, embedding_dim) + + self.decode_1 = nn.Linear(96, 400) + torch.nn.init.kaiming_uniform_(self.decode_1.weight, nonlinearity="leaky_relu") + self.decode_2 = nn.Linear(400, 300) + torch.nn.init.kaiming_uniform_(self.decode_2.weight, nonlinearity="leaky_relu") + self.decode_3 = nn.Linear(300, 2) + self.tanh = nn.Tanh() + + def forward(self, embedding): + + + if len(embedding.shape) == 2: + embedding = embedding.unsqueeze(0) + batch_size, n_agents, _ = embedding.shape + + laser = embedding[:,:, :-5].reshape(batch_size * n_agents, -1) + goal = embedding[:,:, -5:-2].reshape(batch_size * n_agents, -1) + act = embedding[:,:, -2:].reshape(batch_size * n_agents, -1) + laser = laser.unsqueeze(1) + + l = F.leaky_relu(self.cnn1(laser)) + l = F.leaky_relu(self.cnn2(l)) + l = F.leaky_relu(self.cnn3(l)) + l = l.flatten(start_dim=1) + g = F.leaky_relu(self.goal_embed(goal)) + a = F.leaky_relu(self.action_embed(act)) + s = torch.concat((l, g, a), dim=-1) + embedding = F.leaky_relu(self.layer_1(s)) + + + + embedding = embedding.view(batch_size, n_agents, self.embedding_dim) + input_hard = [] + for i in range(n_agents): + h_i = embedding[:, i] + h_hard_i = [] + for j in range(n_agents): + if j != i: + h_hard_i.append(torch.cat([h_i, embedding[:, j]], dim=-1)) + h_hard_i = torch.stack(h_hard_i, dim=0) + input_hard.append(h_hard_i) + + input_hard = torch.stack(input_hard, dim=-2) + input_hard = input_hard.permute(2, 0, 1, 3) # (batch_size, n_agents, n_agents - 1, emb_dim * 2) + input_hard = input_hard.reshape(batch_size * n_agents, n_agents - 1, self.embedding_dim * 2) + + # h_hard = torch.zeros((2 * 1, size, self.embedding_dim)) + # h_hard = h_hard.cuda() + input_hard_reshaped = input_hard.permute(1, 0, 2) # (batch_size * n_agents, n_agents - 1, rnn_hidden_dim * 2) + h_hard = self.hard_mlp(input_hard_reshaped) # Apply MLP over each pair + h_hard = h_hard.reshape(-1, + self.embedding_dim) # (batch_size * n_agents * (n_agents - 1), rnn_hidden_dim * 2) + + hard_weights = self.hard_encoding(h_hard) + hard_weights = F.gumbel_softmax(hard_weights, tau=0.01) + # print(hard_weights) + hard_weights = hard_weights[:, 1].view(-1, n_agents, 1, n_agents - 1) + hard_weights = hard_weights.permute(1, 0, 2, 3) + + embed = embedding.reshape(batch_size * n_agents, self.embedding_dim) + q = self.q(embed).reshape(batch_size, n_agents, self.embedding_dim) # (batch_size, n_agents, args.attention_dim) + k = self.k(embed).reshape(batch_size, n_agents, self.embedding_dim) # (batch_size, n_agents, args.attention_dim) + v = F.relu(self.v(embed)).reshape(batch_size, n_agents, self.embedding_dim) # (batch_size, n_agents, args.attention_dim) + x = [] + + for i in range(n_agents): + q_i = q[:, i].view(-1, 1, self.embedding_dim) # agent i的q,(batch_size, 1, args.attention_dim) + k_i = [k[:, j] for j in range(n_agents) if j != i] # 对于agent i来说,其他agent的k + v_i = [v[:, j] for j in range(n_agents) if j != i] # 对于agent i来说,其他agent的v + + k_i = torch.stack(k_i, dim=0) # (n_agents - 1, batch_size, args.attention_dim) + k_i = k_i.permute(1, 2, 0) # 交换三个维度,变成(batch_size, args.attention_dim, n_agents - 1) + v_i = torch.stack(v_i, dim=0) + v_i = v_i.permute(1, 2, 0) + + score = torch.matmul(q_i, k_i) + + scaled_score = score / np.sqrt(self.embedding_dim) + + soft_weight = F.softmax(scaled_score, dim=-1) # (batch_size,1, n_agents - 1) + + x_i = (v_i * soft_weight * hard_weights[i]).sum(dim=-1) + + x.append(x_i) + + x = torch.stack(x, dim=1).reshape(-1, self.embedding_dim) + final_input = torch.cat([embed, x], dim=-1) + + s = F.leaky_relu(self.decode_1(final_input)) + s = F.leaky_relu(self.decode_2(s)) + a = self.tanh(self.decode_3(s)) + + return a + + + + class Actor(nn.Module): """ Actor network for the CNNTD3 agent. @@ -39,7 +295,7 @@ def __init__(self, action_dim): self.goal_embed = nn.Linear(3, 10) self.action_embed = nn.Linear(2, 10) - self.layer_1 = nn.Linear(36, 400) + self.layer_1 = nn.Linear(24, 400) torch.nn.init.kaiming_uniform_(self.layer_1.weight, nonlinearity="leaky_relu") self.layer_2 = nn.Linear(400, 300) torch.nn.init.kaiming_uniform_(self.layer_2.weight, nonlinearity="leaky_relu") @@ -110,7 +366,7 @@ def __init__(self, action_dim): self.goal_embed = nn.Linear(3, 10) self.action_embed = nn.Linear(2, 10) - self.layer_1 = nn.Linear(36, 400) + self.layer_1 = nn.Linear(24, 400) torch.nn.init.kaiming_uniform_(self.layer_1.weight, nonlinearity="leaky_relu") self.layer_2_s = nn.Linear(400, 300) torch.nn.init.kaiming_uniform_(self.layer_2_s.weight, nonlinearity="leaky_relu") @@ -119,7 +375,7 @@ def __init__(self, action_dim): self.layer_3 = nn.Linear(300, 1) torch.nn.init.kaiming_uniform_(self.layer_3.weight, nonlinearity="leaky_relu") - self.layer_4 = nn.Linear(36, 400) + self.layer_4 = nn.Linear(24, 400) torch.nn.init.kaiming_uniform_(self.layer_1.weight, nonlinearity="leaky_relu") self.layer_5_s = nn.Linear(400, 300) torch.nn.init.kaiming_uniform_(self.layer_5_s.weight, nonlinearity="leaky_relu") @@ -142,9 +398,9 @@ def forward(self, s, action): - q1 (torch.Tensor): First Q-value estimate (batch_size, 1). - q2 (torch.Tensor): Second Q-value estimate (batch_size, 1). """ - laser = s[:, :-5] - goal = s[:, -5:-2] - act = s[:, -2:] + laser = s[:, :-7] + goal = s[:, -7:-4] + act = s[:, -4:-2] laser = laser.unsqueeze(1) l = F.leaky_relu(self.cnn1(laser)) @@ -208,6 +464,7 @@ def __init__( action_dim, max_action, device, + num_robots, lr=1e-4, save_every=0, load_model=False, @@ -218,9 +475,12 @@ def __init__( bound_weight=0.25, ): # Initialize the Actor network + self.num_robots = num_robots self.device = device - self.actor = Actor(action_dim).to(self.device) - self.actor_target = Actor(action_dim).to(self.device) + # self.actor = Actor(action_dim).to(self.device) + # self.actor_target = Actor(action_dim).to(self.device) + self.actor = Attention(48).to(self.device) + self.actor_target = Attention(48).to(self.device) self.actor_target.load_state_dict(self.actor.state_dict()) self.actor_optimizer = torch.optim.Adam(params=self.actor.parameters(), lr=lr) @@ -230,6 +490,8 @@ def __init__( self.critic_target.load_state_dict(self.critic.state_dict()) self.critic_optimizer = torch.optim.Adam(params=self.critic.parameters(), lr=lr) + # self.attention = HardAttention(48).to(self.device) + self.action_dim = action_dim self.max_action = max_action self.state_dim = state_dim @@ -254,12 +516,13 @@ def get_action(self, obs, add_noise): Returns: (np.ndarray): The selected action. """ + action, connection = self.act(obs) if add_noise: return ( - self.act(obs) + np.random.normal(0, 0.2, size=self.action_dim) + action + np.random.normal(0, 0.2, size=self.action_dim) ).clip(-self.max_action, self.max_action) else: - return self.act(obs) + return action.reshape(-1, 2), connection def act(self, state): """ @@ -273,7 +536,9 @@ def act(self, state): """ # Function to get the action from the actor state = torch.Tensor(state).to(self.device) - return self.actor(state).cpu().data.numpy().flatten() + # res = self.attention(state) + action, connection = self.actor(state) + return action.cpu().data.numpy().flatten(), connection # training cycle def train( @@ -322,21 +587,23 @@ def train( batch_dones, batch_next_states, ) = replay_buffer.sample_batch(batch_size) - state = torch.Tensor(batch_states).to(self.device) - next_state = torch.Tensor(batch_next_states).to(self.device) - action = torch.Tensor(batch_actions).to(self.device) - reward = torch.Tensor(batch_rewards).to(self.device) - done = torch.Tensor(batch_dones).to(self.device) + state = torch.Tensor(batch_states).to(self.device).view(-1, self.state_dim) + next_state = torch.Tensor(batch_next_states).to(self.device).view(-1, self.state_dim) + action = torch.Tensor(batch_actions).to(self.device).view(-1, 2) + reward = torch.Tensor(batch_rewards).to(self.device).view(-1, 1) + done = torch.Tensor(batch_dones).to(self.device).view(-1, 1) + # Obtain the estimated action from the next state by using the actor-target - next_action = self.actor_target(next_state) + next_action, _ = self.actor_target(next_state) # Add noise to the action noise = ( torch.Tensor(batch_actions) .data.normal_(0, policy_noise) .to(self.device) - ) + ).reshape(-1, 2) + noise = noise.clamp(-noise_clip, noise_clip) next_action = (next_action + noise).clamp(-self.max_action, self.max_action) @@ -383,7 +650,8 @@ def train( if it % policy_freq == 0: # Maximize the actor output value by performing gradient descent on negative Q values # (essentially perform gradient ascent) - actor_grad, _ = self.critic(state, self.actor(state)) + actn, _ = self.actor(state) + actor_grad, _ = self.critic(state, actn) actor_grad = -actor_grad.mean() self.actor_optimizer.zero_grad() actor_grad.backward() @@ -457,7 +725,7 @@ def load(self, filename, directory): ) print(f"Loaded weights from: {directory}") - def prepare_state(self, latest_scan, distance, cos, sin, collision, goal, action): + def prepare_state(self, latest_scan, distance, cos, sin, collision, goal, action, positions): """ Prepares the environment's raw sensor data and navigation variables into a format suitable for learning. @@ -476,19 +744,30 @@ def prepare_state(self, latest_scan, distance, cos, sin, collision, goal, action - state (list): Normalized and concatenated state vector. - terminal (int): Terminal flag (1 if collision or goal, else 0). """ - latest_scan = np.array(latest_scan) + states = [] + terminal = [] + for i in range(self.num_robots ): + scan = np.array(latest_scan[i]) + dist = distance[i] + cos_i = cos[i] + sin_i = sin[i] + act = action[i] + + inf_mask = np.isinf(scan) + scan[inf_mask] = 7.0 + scan /= 7 + + # Normalize to [0, 1] range + dist /= 10 + lin_vel = act[0] * 2 + ang_vel = (act[1] + 1) / 2 - inf_mask = np.isinf(latest_scan) - latest_scan[inf_mask] = 7.0 - latest_scan /= 7 + position = positions[i] + state = scan.tolist() + [dist, cos_i, sin_i] + [lin_vel, ang_vel] + position - # Normalize to [0, 1] range - distance /= 10 - lin_vel = action[0] * 2 - ang_vel = (action[1] + 1) / 2 - state = latest_scan.tolist() + [distance, cos, sin] + [lin_vel, ang_vel] + assert len(state) == self.state_dim + states.append(state) - assert len(state) == self.state_dim - terminal = 1 if collision or goal else 0 + terminal.append(collision[i]) - return state, terminal + return states, terminal diff --git a/robot_nav/models/CNNTD3/att.py b/robot_nav/models/CNNTD3/att.py new file mode 100644 index 0000000..ea6eef2 --- /dev/null +++ b/robot_nav/models/CNNTD3/att.py @@ -0,0 +1,614 @@ +from pathlib import Path + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from numpy import inf +from torch.utils.tensorboard import SummaryWriter + +from robot_nav.utils import get_max_bound + + +class Attention(nn.Module): + def __init__(self, embedding_dim): + super(Attention, self).__init__() + self.embedding_dim = embedding_dim + + # CNN for laser scan + self.embedding1= nn.Linear(5, 128) + nn.init.kaiming_uniform_(self.embedding1.weight, nonlinearity="leaky_relu") + self.embedding2 = nn.Linear(128, embedding_dim) + nn.init.kaiming_uniform_(self.embedding2.weight, nonlinearity="leaky_relu") + + + # Hard attention MLP with distance + self.hard_mlp = nn.Sequential( + nn.Linear(embedding_dim + 7, embedding_dim), + nn.ReLU(), + nn.Linear(embedding_dim, embedding_dim) + ) + self.hard_encoding = nn.Linear(embedding_dim, 2) + + # Soft attention projections + self.q = nn.Linear(embedding_dim, embedding_dim, bias=False) + self.k = nn.Linear(embedding_dim, embedding_dim, bias=False) + self.v = nn.Linear(embedding_dim, embedding_dim) + + # Soft attention score network (with distance) + self.attn_score_layer = nn.Sequential( + nn.Linear(embedding_dim + 7, embedding_dim), + nn.ReLU(), + nn.Linear(embedding_dim, 1) + ) + + self.v_proj = nn.Linear(7, embedding_dim) + # Decoder + self.decode_1 = nn.Linear(embedding_dim * 2, embedding_dim * 2) + nn.init.kaiming_uniform_(self.decode_1.weight, nonlinearity="leaky_relu") + self.decode_2 = nn.Linear(embedding_dim * 2, embedding_dim * 2) + nn.init.kaiming_uniform_(self.decode_2.weight, nonlinearity="leaky_relu") + + def encode_agent_features(self, embed): + embed = F.leaky_relu(self.embedding1(embed)) + embed = F.leaky_relu(self.embedding2(embed)) + return embed + + def forward(self, embedding): + if embedding.dim() == 2: + embedding = embedding.unsqueeze(0) + batch_size, n_agents, _ = embedding.shape + + embed = embedding[:, :, 4:].reshape(batch_size * n_agents, -1) + position = embedding[:, :, :2].reshape(batch_size, n_agents, 2) + heading = embedding[:, :, 2:4].reshape(batch_size, n_agents, 2) # assume (cos(θ), sin(θ)) + action = embedding[:, :, -2:].reshape(batch_size, n_agents, 2) + + agent_embed = self.encode_agent_features(embed) + agent_embed = agent_embed.view(batch_size, n_agents, self.embedding_dim) + + # For hard attention + h_i = agent_embed.unsqueeze(2) # (B, N, 1, D) + pos_i = position.unsqueeze(2) # (B, N, 1, 2) + pos_j = position.unsqueeze(1) # (B, 1, N, 2) + heading_i = heading.unsqueeze(2) # (B, N, 1, 2) + heading_j = heading.unsqueeze(1).expand(-1, n_agents, -1, -1) # (B, 1, N, 2) + + # Compute relative vectors and distance + rel_vec = pos_j - pos_i # (B, N, N, 2) + dx, dy = rel_vec[..., 0], rel_vec[..., 1] + rel_dist = torch.linalg.vector_norm(rel_vec, dim=-1, keepdim=True) # (B, N, N, 1) + + # Relative angle in agent i's frame + angle = torch.atan2(dy, dx) - torch.atan2(heading_i[..., 1], heading_i[..., 0]) + angle = (angle + np.pi) % (2 * np.pi) - np.pi + rel_angle_sin = torch.sin(angle) + rel_angle_cos = torch.cos(angle) + + # Other agent's heading + heading_j_cos = heading_j[..., 0] # (B, 1, N) + heading_j_sin = heading_j[..., 1] # (B, 1, N) + + # Stack edge features + edge_features = torch.cat([ + rel_dist, # (B, N, N, 1) + rel_angle_cos.unsqueeze(-1), # (B, N, N, 1) + rel_angle_sin.unsqueeze(-1), # (B, N, N, 1) + heading_j_cos.unsqueeze(-1), # (B, N, N, 1) + heading_j_sin.unsqueeze(-1), # (B, N, N, 1) + action.unsqueeze(1).expand(-1, n_agents, -1, -1) # (B, N, N, 2) + ], dim=-1) # (B, N, N, 7) + + # Broadcast h_i along N (for each pair) + h_i_expanded = h_i.expand(-1, -1, n_agents, -1) # (B, N, N, D) + + # Remove self-pairs using mask + mask = ~torch.eye(n_agents, dtype=torch.bool, device=embedding.device) + h_i_flat = h_i_expanded[:, mask].reshape(batch_size * n_agents, n_agents - 1, self.embedding_dim) + edge_flat = edge_features[:, mask].reshape(batch_size * n_agents, n_agents - 1, -1) + + # Concatenate agent embedding and edge features + hard_input = torch.cat([h_i_flat, edge_flat], dim=-1) # (B*N, N-1, D+7) + + # Hard attention forward + h_hard = self.hard_mlp(hard_input) + hard_logits = self.hard_encoding(h_hard) + hard_weights = F.gumbel_softmax(hard_logits, hard=False, tau=0.5, dim=-1)[..., 1].unsqueeze(2) + hard_weights = hard_weights.view(batch_size, n_agents, n_agents - 1) + + unnorm_rel_vec = rel_vec * 12 + unnorm_rel_dist = torch.linalg.vector_norm(unnorm_rel_vec, dim=-1, keepdim=True) + unnorm_rel_dist = unnorm_rel_dist[:, mask].reshape(batch_size * n_agents, n_agents - 1, 1) + + # Soft attention + q = self.q(agent_embed) + + attention_outputs = [] + entropy_list = [] + combined_w = [] + for i in range(n_agents): + q_i = q[:, i:i + 1, :] # (B, 1, D) + mask = torch.ones(n_agents, dtype=torch.bool, device=edge_features.device) + mask[i] = False + edge_i_wo_self = edge_features[:, i, mask, :] + edge_i_wo_self = edge_i_wo_self.squeeze(1) # (B, N-1, 7) + + q_i_expanded = q_i.expand(-1, n_agents - 1, -1) # (B, N-1, D) + attention_input = torch.cat([q_i_expanded, edge_i_wo_self], dim=-1) # (B, N-1, D+7) + + # Score computation + scores = self.attn_score_layer(attention_input).transpose(1, 2) # (B, 1, N-1) + + # Mask using hard weights + h_weights = hard_weights[:, i].unsqueeze(1) # (B, 1, N-1) + mask = (h_weights > 0.5).float() + + # All-zero mask handling + epsilon = 1e-6 + mask_sum = mask.sum(dim=-1, keepdim=True) + all_zero_mask = (mask_sum < epsilon) + + # Apply mask to scores + masked_scores = scores.masked_fill(mask == 0, float('-inf')) + + # Compute softmax, safely handle all-zero cases + soft_weights = F.softmax(masked_scores, dim=-1) + soft_weights = torch.where(all_zero_mask, torch.zeros_like(soft_weights), soft_weights) + + combined_weights = soft_weights * mask # (B, 1, N-1) + combined_w.append(combined_weights) + + # Normalize combined_weights for entropy calculation + combined_weights_norm = combined_weights / (combined_weights.sum(dim=-1, keepdim=True) + epsilon) + + # Calculate entropy from combined_weights + entropy = -(combined_weights_norm * (combined_weights_norm + epsilon).log()).sum(dim=-1).mean() + entropy_list.append(entropy) + + # Project each other agent's features to embedding dim *before* the attention-weighted sum + v_j = self.v_proj(edge_i_wo_self) # (B, N-1, embedding_dim) + attn_output = torch.bmm(combined_weights, v_j).squeeze(1) # (B, embedding_dim) + attention_outputs.append(attn_output) + + comb_w = torch.stack(combined_w, dim=1).reshape(n_agents, -1) + attn_stack = torch.stack(attention_outputs, dim=1).reshape(-1, self.embedding_dim) + self_embed = agent_embed.reshape(-1, self.embedding_dim) + concat_embed = torch.cat([self_embed, attn_stack], dim=-1) + + x = F.leaky_relu(self.decode_1(concat_embed)) + att_embedding = F.leaky_relu(self.decode_2(x)) + + mean_entropy = torch.stack(entropy_list).mean() + + return att_embedding, hard_logits[..., 1], unnorm_rel_dist, mean_entropy, hard_weights, comb_w + + +class Actor(nn.Module): + def __init__(self, action_dim, embedding_dim): + super().__init__() + self.attention = Attention(embedding_dim) # ➊ edge classifier + + # ➋ policy head (everything _after_ attention) + self.policy_head = nn.Sequential( + nn.Linear(embedding_dim * 2, 400), + nn.LeakyReLU(), + nn.Linear(400, 300), + nn.LeakyReLU(), + nn.Linear(300, action_dim), + nn.Tanh(), + ) + + def forward(self, obs, detach_attn=False): + attn_out, hard_logits, pair_d, mean_entropy, hard_weights, combined_weights = self.attention(obs) + if detach_attn: # used in the policy phase + attn_out = attn_out.detach() + action = self.policy_head(attn_out) + return action, hard_logits, pair_d, mean_entropy, hard_weights, combined_weights + + +class Critic(nn.Module): + def __init__(self, action_dim, embedding_dim): + super(Critic, self).__init__() + self.embedding_dim = embedding_dim + self.attention = Attention(self.embedding_dim) + + self.layer_1 = nn.Linear(self.embedding_dim * 2, 400) + torch.nn.init.kaiming_uniform_(self.layer_1.weight, nonlinearity="leaky_relu") + + self.layer_2_s = nn.Linear(400, 300) + torch.nn.init.kaiming_uniform_(self.layer_2_s.weight, nonlinearity="leaky_relu") + + self.layer_2_a = nn.Linear(action_dim, 300) + torch.nn.init.kaiming_uniform_(self.layer_2_a.weight, nonlinearity="leaky_relu") + + self.layer_3 = nn.Linear(300, 1) + torch.nn.init.kaiming_uniform_(self.layer_3.weight, nonlinearity="leaky_relu") + + self.layer_4 = nn.Linear(self.embedding_dim * 2, 400) + torch.nn.init.kaiming_uniform_(self.layer_4.weight, nonlinearity="leaky_relu") # ✅ Fixed init bug + + self.layer_5_s = nn.Linear(400, 300) + torch.nn.init.kaiming_uniform_(self.layer_5_s.weight, nonlinearity="leaky_relu") + + self.layer_5_a = nn.Linear(action_dim, 300) + torch.nn.init.kaiming_uniform_(self.layer_5_a.weight, nonlinearity="leaky_relu") + + self.layer_6 = nn.Linear(300, 1) + torch.nn.init.kaiming_uniform_(self.layer_6.weight, nonlinearity="leaky_relu") + + def forward(self, embedding, action): + + embedding_with_attention, hard_logits, unnorm_rel_dist, mean_entropy, hard_weights, _ = self.attention(embedding) + + # Q1 + s1 = F.leaky_relu(self.layer_1(embedding_with_attention)) + s1 = F.leaky_relu(self.layer_2_s(s1) + self.layer_2_a(action)) # ✅ No .data + q1 = self.layer_3(s1) + + # Q2 + s2 = F.leaky_relu(self.layer_4(embedding_with_attention)) + s2 = F.leaky_relu(self.layer_5_s(s2) + self.layer_5_a(action)) # ✅ No .data + q2 = self.layer_6(s2) + + return q1, q2, mean_entropy, hard_logits, unnorm_rel_dist, hard_weights + + + +# CNNTD3 network +class CNNTD3(object): + """ + CNNTD3 (Twin Delayed Deep Deterministic Policy Gradient with CNN-based inputs) agent for + continuous control tasks. + + This class encapsulates the full implementation of the TD3 algorithm using neural network + architectures for the actor and critic, with optional bounding for critic outputs to + regularize learning. The agent is designed to train in environments where sensor + observations (e.g., LiDAR) are used for navigation tasks. + + Args: + state_dim (int): Dimension of the input state. + action_dim (int): Dimension of the output action. + max_action (float): Maximum magnitude of the action. + device (torch.device): Torch device to use (CPU or GPU). + lr (float): Learning rate for both actor and critic optimizers. + save_every (int): Save model every N training iterations (0 to disable). + load_model (bool): Whether to load a pre-trained model at initialization. + save_directory (Path): Path to the directory for saving model checkpoints. + model_name (str): Base name for the saved model files. + load_directory (Path): Path to load model checkpoints from (if `load_model=True`). + use_max_bound (bool): Whether to apply maximum Q-value bounding during training. + bound_weight (float): Weight for the bounding loss term in total loss. + """ + + def __init__( + self, + state_dim, + action_dim, + max_action, + device, + num_robots, + lr_actor=1e-4, + lr_critic=3e-4, + save_every=0, + load_model=False, + save_directory=Path("robot_nav/models/CNNTD3/checkpoint"), + model_name="CNNTD3", + load_model_name = None, + load_directory=Path("robot_nav/models/CNNTD3/checkpoint"), + use_max_bound=False, + bound_weight=0.25, + ): + # Initialize the Actor network + self.num_robots = num_robots + self.device = device + self.actor = Actor(action_dim, embedding_dim=256).to(self.device) # Using the updated Actor + self.actor_target = Actor(action_dim, embedding_dim=256).to(self.device) + self.actor_target.load_state_dict(self.actor.state_dict()) + # self.actor_optimizer = torch.optim.Adam(params=self.actor.parameters(), lr=lr_actor) + + self.attn_params = list(self.actor.attention.parameters()) + self.policy_params = list(self.actor.policy_head.parameters()) + + # self.attn_opt = torch.optim.Adam(self.attn_params, lr=1e-3) # edge classifier + self.actor_optimizer = torch.optim.Adam(self.policy_params + self.attn_params, lr=lr_actor) # TD3 policy + + self.critic = Critic(action_dim, embedding_dim=256).to(self.device) # Using the updated Critic + self.critic_target = Critic(action_dim, embedding_dim=256).to(self.device) + self.critic_target.load_state_dict(self.critic.state_dict()) + self.critic_optimizer = torch.optim.Adam(params=self.critic.parameters(), lr=lr_critic) + self.action_dim = action_dim + self.max_action = max_action + self.state_dim = state_dim + self.writer = SummaryWriter(comment=model_name) + self.iter_count = 0 + if load_model_name is None: + load_model_name = model_name + if load_model: + self.load(filename=load_model_name, directory=load_directory) + self.save_every = save_every + self.model_name = model_name + self.save_directory = save_directory + self.use_max_bound = use_max_bound + self.bound_weight = bound_weight + + def get_action(self, obs, add_noise): + """ + Selects an action for a given observation. + + Args: + obs (np.ndarray): The current observation/state. + add_noise (bool): Whether to add exploration noise to the action. + + Returns: + (np.ndarray): The selected action. + """ + action, connection, combined_weights = self.act(obs) + if add_noise: + action = (action + np.random.normal(0, 0.1, size=action.shape) + ).clip(-self.max_action, self.max_action) + + return action.reshape(-1, 2), connection, combined_weights + + def act(self, state): + """ + Computes the deterministic action from the actor network for a given state. + + Args: + state (np.ndarray): Input state. + + Returns: + (np.ndarray): Action predicted by the actor network. + """ + # Function to get the action from the actor + state = torch.Tensor(state).to(self.device) + # res = self.attention(state) + action, connection, _, _, _, combined_weights = self.actor(state) + return action.cpu().data.numpy().flatten(), connection, combined_weights + + # training cycle + def train( + self, + replay_buffer, + iterations, + batch_size, + discount=0.99, + tau=0.005, + policy_noise=0.2, + noise_clip=0.5, + policy_freq=2, + max_lin_vel=0.5, + max_ang_vel=1, + goal_reward=100, + distance_norm=10, + time_step=0.3, + ): + av_Q = 0 + max_Q = -inf + av_loss = 0 + av_critic_loss = 0 + av_critic_entropy = [] + av_actor_entropy = [] + av_actor_loss = 0 + av_critic_bce_loss = [] + av_actor_bce_loss = [] + + for it in range(iterations): + # sample a batch + ( + batch_states, + batch_actions, + batch_rewards, + batch_dones, + batch_next_states, + ) = replay_buffer.sample_batch(batch_size) + + state = torch.Tensor(batch_states).to(self.device).view(batch_size, self.num_robots, self.state_dim) + next_state = torch.Tensor(batch_next_states).to(self.device).view(batch_size, self.num_robots, self.state_dim) + action = torch.Tensor(batch_actions).to(self.device).view(batch_size * self.num_robots, self.action_dim) + reward = torch.Tensor(batch_rewards).to(self.device).view(batch_size * self.num_robots, 1) + done = torch.Tensor(batch_dones).to(self.device).view(batch_size * self.num_robots, 1) + + with torch.no_grad(): + next_action, _, _, _, _, _ = self.actor_target(next_state, detach_attn=True) + + # --- Target smoothing --- + noise = ( + torch.Tensor(batch_actions) + .data.normal_(0, policy_noise) + .to(self.device) + ).reshape(-1, 2) + noise = noise.clamp(-noise_clip, noise_clip) + next_action = (next_action + noise).clamp(-self.max_action, self.max_action) + + # --- Target Q values --- + target_Q1, target_Q2, _, _, _, _ = self.critic_target(next_state, next_action) + target_Q = torch.min(target_Q1, target_Q2) + av_Q += target_Q.mean() + max_Q = max(max_Q, target_Q.max().item()) + target_Q = reward + ((1 - done) * discount * target_Q).detach() + + # --- Critic update --- + current_Q1, current_Q2, mean_entropy, hard_logits, unnorm_rel_dist, hard_weights = self.critic(state, action) + critic_loss = F.mse_loss(current_Q1, target_Q) + F.mse_loss(current_Q2, target_Q) + + proximity_threshold = 4 # You may need to adjust this + targets = (unnorm_rel_dist.flatten() < proximity_threshold).float() + flat_logits = hard_logits.flatten() + bce_loss = F.binary_cross_entropy_with_logits(flat_logits, targets) + # masked_weights = hard_weights.flatten()[mask] + # target = torch.ones_like(masked_weights) + # num_pos = masked_weights.numel() + # if num_pos > 0: + # bce_loss = F.binary_cross_entropy(masked_weights, target, reduction='sum') / num_pos + # else: + # bce_loss = torch.tensor(0.0, device=masked_weights.device) + + bce_weight = 0.1 + av_critic_bce_loss.append(bce_loss) + + critic_entropy_weight = 1 # or tuneable + total_loss = critic_loss - critic_entropy_weight * mean_entropy + bce_weight * bce_loss + av_critic_entropy.append(mean_entropy) + + self.critic_optimizer.zero_grad() + total_loss.backward() + torch.nn.utils.clip_grad_norm_(self.critic.parameters(), 10.0) + self.critic_optimizer.step() + + av_loss += total_loss.item() + av_critic_loss += critic_loss.item() + + # --- Actor update --- + if it % policy_freq == 0: + + action, hard_logits, unnorm_rel_dist, mean_entropy, hard_weights, _ = self.actor(state, detach_attn=False) + targets = (unnorm_rel_dist.flatten() < proximity_threshold).float() + flat_logits = hard_logits.flatten() + bce_loss = F.binary_cross_entropy_with_logits(flat_logits, targets) + # masked_weights = hard_weights.flatten()[mask] + # target = torch.ones_like(masked_weights) + # num_pos = masked_weights.numel() + # if num_pos > 0: + # bce_loss = F.binary_cross_entropy(masked_weights, target, reduction='sum') / num_pos + # else: + # bce_loss = torch.tensor(0.0, device=masked_weights.device) + + bce_weight = 0.1 + av_actor_bce_loss.append(bce_loss) + + actor_Q, _, _, _, _, _ = self.critic(state, action) + actor_loss = -actor_Q.mean() + actor_entropy_weight = 0.05 + total_loss = actor_loss - actor_entropy_weight * mean_entropy + bce_weight * bce_loss + av_actor_entropy.append(mean_entropy) + + self.actor_optimizer.zero_grad() + total_loss.backward() + torch.nn.utils.clip_grad_norm_(self.policy_params, 10.0) + self.actor_optimizer.step() + + av_actor_loss += total_loss.item() + + # Soft update target networks + for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()): + target_param.data.copy_(tau * param.data + (1 - tau) * target_param.data) + + for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()): + target_param.data.copy_(tau * param.data + (1 - tau) * target_param.data) + + self.iter_count += 1 + self.writer.add_scalar("train/loss_total", av_loss / iterations, self.iter_count) + self.writer.add_scalar("train/critic_loss", av_critic_loss / iterations, self.iter_count) + self.writer.add_scalar("train/av_critic_entropy", sum(av_critic_entropy) / len(av_critic_entropy), self.iter_count) + self.writer.add_scalar("train/av_actor_entropy", sum(av_actor_entropy) / len(av_actor_entropy), + self.iter_count) + self.writer.add_scalar("train/av_critic_bce_loss", sum(av_critic_bce_loss) / len(av_critic_bce_loss), + self.iter_count) + self.writer.add_scalar("train/av_actor_bce_loss", sum(av_actor_bce_loss) / len(av_actor_bce_loss), + self.iter_count) + self.writer.add_scalar("train/avg_Q", av_Q / iterations, self.iter_count) + self.writer.add_scalar("train/max_Q", max_Q, self.iter_count) + + self.writer.add_scalar("train/actor_loss", av_actor_loss / (iterations // policy_freq), self.iter_count) + + if self.save_every > 0 and self.iter_count % self.save_every == 0: + self.save(filename=self.model_name, directory=self.save_directory) + + def save(self, filename, directory): + """ + Saves the current model parameters to the specified directory. + + Args: + filename (str): Base filename for saved files. + directory (Path): Path to save the model files. + """ + Path(directory).mkdir(parents=True, exist_ok=True) + torch.save(self.actor.state_dict(), "%s/%s_actor.pth" % (directory, filename)) + torch.save( + self.actor_target.state_dict(), + "%s/%s_actor_target.pth" % (directory, filename), + ) + torch.save(self.critic.state_dict(), "%s/%s_critic.pth" % (directory, filename)) + torch.save( + self.critic_target.state_dict(), + "%s/%s_critic_target.pth" % (directory, filename), + ) + + def load(self, filename, directory): + """ + Loads model parameters from the specified directory. + + Args: + filename (str): Base filename for saved files. + directory (Path): Path to load the model files from. + """ + self.actor.load_state_dict( + torch.load("%s/%s_actor.pth" % (directory, filename)) + ) + self.actor_target.load_state_dict( + torch.load("%s/%s_actor_target.pth" % (directory, filename)) + ) + self.critic.load_state_dict( + torch.load("%s/%s_critic.pth" % (directory, filename)) + ) + self.critic_target.load_state_dict( + torch.load("%s/%s_critic_target.pth" % (directory, filename)) + ) + print(f"Loaded weights from: {directory}") + + def prepare_state(self, poses, distance, cos, sin, collision, goal, action, positions, goal_positions): + """ + Prepares the environment's raw agent state for learning. + + Args: + poses (list): Each agent's global pose [x, y, theta]. + distance, cos, sin: Unused, can be removed or ignored. + collision (list): Collision flags per agent. + goal (list): Goal reached flags per agent. + action (list): Last action taken [lin_vel, ang_vel]. + positions (list): Extra features (e.g., neighbors). + goal_positions (list): Each agent's goal [x, y]. + + Returns: + states (list): List of processed state vectors. + terminal (list): Flags (1 if collision or goal reached, else 0). + """ + states = [] + terminal = [] + + for i in range(self.num_robots): + pose = poses[i] # [x, y, theta] + goal_pos = goal_positions[i] # [goal_x, goal_y] + act = action[i] # [lin_vel, ang_vel] + + px, py, theta = pose + gx, gy = goal_pos + + # Global position (keep for boundary awareness) + x = px / 12 + y = py / 12 + + # Relative goal position in local frame + dx = gx - px + dy = gy - py + rel_gx = dx * np.cos(theta) + dy * np.sin(theta) + rel_gy = -dx * np.sin(theta) + dy * np.cos(theta) + rel_gx /= 12 + rel_gy /= 12 + + # Heading as cos/sin + heading_cos = np.cos(theta) + heading_sin = np.sin(theta) + + # Last velocity + lin_vel = act[0] * 2 # Assuming original range [-0.5, 0.5] + ang_vel = (act[1] + 1) / 2 # Assuming original range [-1, 1] + + # Final state vector + state = [x, y, heading_cos, heading_sin, distance[i]/17, cos[i], sin[i], lin_vel, ang_vel] + + assert len(state) == self.state_dim, f"State length mismatch: expected {self.state_dim}, got {len(state)}" + states.append(state) + terminal.append(collision[i]) + + return states, terminal diff --git a/robot_nav/models/TD3/TD3.py b/robot_nav/models/TD3/TD3.py index c35943b..5bdbf16 100644 --- a/robot_nav/models/TD3/TD3.py +++ b/robot_nav/models/TD3/TD3.py @@ -445,7 +445,7 @@ def prepare_state(self, latest_scan, distance, cos, sin, collision, goal, action ang_vel = (action[1] + 1) / 2 state = min_values + [distance, cos, sin] + [lin_vel, ang_vel] - assert len(state) == self.state_dim + assert len(state) == self.state_dim, f"{len(state), self.state_dim}" terminal = 1 if collision or goal else 0 return state, terminal diff --git a/robot_nav/multi_robot_world.yaml b/robot_nav/multi_robot_world.yaml new file mode 100644 index 0000000..b62285f --- /dev/null +++ b/robot_nav/multi_robot_world.yaml @@ -0,0 +1,58 @@ +world: + height: 12 # the height of the world + width: 12 # the height of the world + step_time: 0.3 # 10Hz calculate each step + sample_time: 0.3 # 10 Hz for render and data extraction + collision_mode: 'reactive' + +robot: + - number: 3 + kinematics: {name: 'diff'} + distribution: {name: 'circle', radius: 4, center: [6, 6]} + shape: {name: 'circle', radius: 0.2} + vel_min: [ 0, -1.0 ] + vel_max: [ 1.0, 1.0 ] + state: [2, 2, 0, 0] + goal: [9, 9, 0] + color: ['royalblue', 'red', 'green', 'orange', 'purple'] + arrive_mode: position + goal_threshold: 0.3 + + sensors: + - type: 'lidar2d' + range_min: 0 + range_max: 7 + angle_range: 3.14 + number: 90 + noise: True + std: 0.08 + angle_std: 0.1 + offset: [ 0, 0, 0 ] + alpha: 0.3 + + plot: + show_trajectory: False + show_sensor: False + +obstacle: + - number: 4 + kinematics: {name: 'static'} + distribution: {name: 'random', range_low: [0, 0, -3.14], range_high: [12, 12, 3.14]} + behavior: {name: 'rvo', wander: True, range_low: [0, 0, -3.14], range_high: [12, 12, 3.14], vxmax: 0.2, vymax: 0.2, factor: 1.0} + vel_max: [0.2, 0.2] + vel_min: [-0.2, -0.2] + shape: + - {name: 'circle', radius: 1.0, random_shape: True} + - {name: 'polygon', random_shape: true, avg_radius_range: [0.5, 1.0], irregularity_range: [0, 0.4], spikeyness_range: [0, 0.4], num_vertices_range: [4, 6]} + - shape: {name: 'circle', radius: 0.8} # length, width + state: [ 5, 10, 1 ] + kinematics: { name: 'static' } + - shape: { name: 'rectangle', length: 1.0, width: 1.2 } # length, width + state: [ 8, 5, 1 ] + kinematics: {name: 'static'} + - shape: { name: 'rectangle', length: 0.5, width: 2.1 } # length, width + state: [ 1, 8, 1.3 ] + kinematics: {name: 'static'} + - shape: { name: 'linestring', vertices: [ [ 0, 0 ], [ 12, 0 ], [ 12, 12 ], [ 0, 12 ],[ 0, 0 ] ] } # vertices + kinematics: {name: 'static'} + state: [ 0, 0, 0 ] diff --git a/robot_nav/multi_robot_world2.yaml b/robot_nav/multi_robot_world2.yaml new file mode 100644 index 0000000..2a822f8 --- /dev/null +++ b/robot_nav/multi_robot_world2.yaml @@ -0,0 +1,27 @@ +world: + height: 12 # the height of the world + width: 12 # the height of the world + step_time: 0.3 # 10Hz calculate each step + sample_time: 0.3 # 10 Hz for render and data extraction + collision_mode: 'reactive' + +robot: + - number: 5 + kinematics: {name: 'diff'} + distribution: {name: 'circle', radius: 4, center: [6, 6]} + shape: {name: 'circle', radius: 0.2} + vel_min: [ 0, -1.0 ] + vel_max: [ 1.0, 1.0 ] + state: [2, 2, 0, 0] + goal: [9, 9, 0] + color: ['royalblue', 'red', 'green', 'orange', 'purple', 'yellow', 'cyan', 'magenta', 'lime', 'pink', 'brown'] + arrive_mode: position + goal_threshold: 0.3 + + plot: + show_trajectory: False + +#obstacle: +# - shape: { name: 'linestring', vertices: [ [ 0, 0 ], [ 12, 0 ], [ 12, 12 ], [ 0, 12 ],[ 0, 0 ] ] } # vertices +# kinematics: {name: 'static'} +# state: [ 0, 0, 0 ] diff --git a/robot_nav/multi_train.py b/robot_nav/multi_train.py new file mode 100644 index 0000000..97d34e1 --- /dev/null +++ b/robot_nav/multi_train.py @@ -0,0 +1,145 @@ +from robot_nav.models.TD3.TD3 import TD3 +from robot_nav.models.DDPG.DDPG import DDPG +from robot_nav.models.SAC.SAC import SAC +from robot_nav.models.HCM.hardcoded_model import HCM +from robot_nav.models.PPO.PPO import PPO +from robot_nav.models.CNNTD3.CNNTD3 import CNNTD3 + +import torch +import numpy as np +from sim import SIM_ENV +from utils import get_buffer + + +def main(args=None): + """Main training function""" + action_dim = 2 # number of actions produced by the model + max_action = 1 # maximum absolute value of output actions + state_dim = 97 # number of input values in the neural network (vector length of state input) + device = torch.device( + "cuda" if torch.cuda.is_available() else "cpu" + ) # using cuda if it is available, cpu otherwise + nr_eval_episodes = 10 # how many episodes to use to run evaluation + max_epochs = 60 # max number of epochs + epoch = 0 # starting epoch number + episodes_per_epoch = 70 # how many episodes to run in single epoch + episode = 0 # starting episode number + train_every_n = 2 # train and update network parameters every n episodes + training_iterations = 80 # how many batches to use for single training cycle + batch_size = 12 # batch size for each training iteration + max_steps = 300 # maximum number of steps in single episode + steps = 0 # starting step number + load_saved_buffer = False # whether to load experiences from assets/data.yml + pretrain = False # whether to use the loaded experiences to pre-train the model (load_saved_buffer must be True) + pretraining_iterations = ( + 10 # number of training iterations to run during pre-training + ) + save_every = 5 # save the model every n training cycles + + + + sim = SIM_ENV(world_file="multi_robot_world.yaml",disable_plotting=False) # instantiate environment + + model = CNNTD3( + state_dim=state_dim, + action_dim=action_dim, + max_action=max_action, + num_robots=sim.num_robots, + device=device, + save_every=save_every, + load_model=False, + model_name="CNNTD3", + ) # instantiate a model + + replay_buffer = get_buffer( + model, + sim, + load_saved_buffer, + pretrain, + pretraining_iterations, + training_iterations, + batch_size, + ) + con = torch.tensor([[[0,0], [0,0], [0,0]]]) + + latest_scan, distance, cos, sin, collision, goal, a, reward, positions = sim.step([[0, 0] for _ in range(sim.num_robots)], con) # get the initial step state + + while epoch < max_epochs: # train until max_epochs is reached + state, terminal = model.prepare_state( + latest_scan, distance, cos, sin, collision, goal, a, positions + ) # get state a state representation from returned data from the environment + + action, connection = model.get_action(np.array(state), False) # get an action from the model + + a_in = [[(a[0] + 1) / 4, a[1]] for a in action] # clip linear velocity to [0, 0.5] m/s range + + latest_scan, distance, cos, sin, collision, goal, a, reward, positions = sim.step(a_in, connection) # get data from the environment + next_state, terminal = model.prepare_state( + latest_scan, distance, cos, sin, collision, goal, a, positions + ) # get a next state representation + replay_buffer.add( + state, action, reward, terminal, next_state + ) # add experience to the replay buffer + + if ( + any(terminal) or steps == max_steps + ): # reset environment of terminal stat ereached, or max_steps were taken + latest_scan, distance, cos, sin, collision, goal, a, reward, positions = sim.reset() + episode += 1 + if episode % train_every_n == 0: + model.train( + replay_buffer=replay_buffer, + iterations=training_iterations, + batch_size=batch_size, + ) # train the model and update its parameters + + steps = 0 + else: + steps += 1 + + if ( + episode + 1 + ) % episodes_per_epoch == 0: # if epoch is concluded, run evaluation + episode = 0 + epoch += 1 + evaluate(model, epoch, sim, eval_episodes=nr_eval_episodes) + + +def evaluate(model, epoch, sim, eval_episodes=10): + print("..............................................") + print(f"Epoch {epoch}. Evaluating scenarios") + avg_reward = 0.0 + col = 0 + goals = 0 + for _ in range(eval_episodes): + count = 0 + latest_scan, distance, cos, sin, collision, goal, a, reward, positions = sim.reset() + done = False + while not done and count < 501: + state, terminal = model.prepare_state( + latest_scan, distance, cos, sin, collision, goal, a, positions + ) + action, connection = model.get_action(np.array(state), False) + a_in = [[(a[0] + 1) / 4, a[1]] for a in action] + latest_scan, distance, cos, sin, collision, goal, a, reward, positions = sim.step(a_in, connection) + avg_reward += sum(reward)/len(reward) + count += 1 + if collision: + col += 1 + if goal: + goals += 1 + done = collision or goal + avg_reward /= eval_episodes + avg_col = col / eval_episodes + avg_goal = goals / eval_episodes + print(f"Average Reward: {avg_reward}") + print(f"Average Collision rate: {avg_col}") + print(f"Average Goal rate: {avg_goal}") + print("..............................................") + model.writer.add_scalar("eval/avg_reward", avg_reward, epoch) + model.writer.add_scalar("eval/avg_col", avg_col, epoch) + model.writer.add_scalar("eval/avg_goal", avg_goal, epoch) + + +if __name__ == "__main__": + main() diff --git a/robot_nav/multi_train2.py b/robot_nav/multi_train2.py new file mode 100644 index 0000000..63c8e8e --- /dev/null +++ b/robot_nav/multi_train2.py @@ -0,0 +1,154 @@ +from robot_nav.models.TD3.TD3 import TD3 +from robot_nav.models.DDPG.DDPG import DDPG +from robot_nav.models.SAC.SAC import SAC +from robot_nav.models.HCM.hardcoded_model import HCM +from robot_nav.models.PPO.PPO import PPO +from robot_nav.models.CNNTD3.att import CNNTD3 + +import torch +import numpy as np +from sim2 import SIM_ENV +from utils import get_buffer + +def outside_of_bounds(poses): + outside = False + for pose in poses: + norm_x = pose[0] - 6 + norm_y = pose[1] - 6 + if abs(norm_x) > 10.5 or abs(norm_y) > 10.5: + outside = True + break + return outside + +def main(args=None): + """Main training function""" + action_dim = 2 # number of actions produced by the model + max_action = 1 # maximum absolute value of output actions + state_dim = 9 # number of input values in the neural network (vector length of state input) + device = torch.device( + "cuda" if torch.cuda.is_available() else "cpu" + ) # using cuda if it is available, cpu otherwise + nr_eval_episodes = 10 # how many episodes to use to run evaluation + max_epochs = 100 # max number of epochs + epoch = 0 # starting epoch number + episodes_per_epoch = 70 # how many episodes to run in single epoch + episode = 0 # starting episode number + train_every_n = 10 # train and update network parameters every n episodes + training_iterations = 80 # how many batches to use for single training cycle + batch_size = 16 # batch size for each training iteration + max_steps = 300 # maximum number of steps in single episode + steps = 0 # starting step number + load_saved_buffer = False # whether to load experiences from assets/data.yml + pretrain = False # whether to use the loaded experiences to pre-train the model (load_saved_buffer must be True) + pretraining_iterations = ( + 10 # number of training iterations to run during pre-training + ) + save_every = 5 # save the model every n training cycles + + + + sim = SIM_ENV(world_file="multi_robot_world2.yaml",disable_plotting=False) # instantiate environment + + model = CNNTD3( + state_dim=state_dim, + action_dim=action_dim, + max_action=max_action, + num_robots=sim.num_robots, + device=device, + save_every=save_every, + load_model=True, + model_name="CNNTD3", + ) # instantiate a model + + replay_buffer = get_buffer( + model, + sim, + load_saved_buffer, + pretrain, + pretraining_iterations, + training_iterations, + batch_size, + ) + con = torch.tensor([[0. for _ in range(sim.num_robots-1)] for _ in range(sim.num_robots) ]) + + poses, distance, cos, sin, collision, goal, a, reward, positions, goal_positions = sim.step([[0, 0] for _ in range(sim.num_robots)], con) # get the initial step state + + while epoch < max_epochs: # train until max_epochs is reached + state, terminal = model.prepare_state( + poses, distance, cos, sin, collision, goal, a, positions, goal_positions + ) # get state a state representation from returned data from the environment + + action, connection, combined_weights = model.get_action(np.array(state), True) # get an action from the model + + a_in = [[(a[0] + 1) / 4, a[1]] for a in action] # clip linear velocity to [0, 0.5] m/s range + + poses, distance, cos, sin, collision, goal, a, reward, positions, goal_positions = sim.step(a_in, connection, combined_weights) # get data from the environment + next_state, terminal = model.prepare_state( + poses, distance, cos, sin, collision, goal, a, positions, goal_positions + ) # get a next state representation + replay_buffer.add( + state, action, reward, terminal, next_state + ) # add experience to the replay buffer + outside = outside_of_bounds(poses) + if ( + any(terminal) or steps == max_steps or outside + ): # reset environment of terminal stat ereached, or max_steps were taken + poses, distance, cos, sin, collision, goal, a, reward, positions, goal_positions = sim.reset() + episode += 1 + if episode % train_every_n == 0: + model.train( + replay_buffer=replay_buffer, + iterations=training_iterations, + batch_size=batch_size, + ) # train the model and update its parameters + + steps = 0 + else: + steps += 1 + + if ( + episode + 1 + ) % episodes_per_epoch == 0: # if epoch is concluded, run evaluation + episode = 0 + epoch += 1 + # evaluate(model, epoch, sim, eval_episodes=nr_eval_episodes) + + +def evaluate(model, epoch, sim, eval_episodes=10): + print("..............................................") + print(f"Epoch {epoch}. Evaluating scenarios") + avg_reward = 0.0 + col = 0 + goals = 0 + for _ in range(eval_episodes): + count = 0 + poses, distance, cos, sin, collision, goal, a, reward, positions, goal_positions = sim.reset() + done = False + while not done and count < 501: + state, terminal = model.prepare_state( + poses, distance, cos, sin, collision, goal, a, positions, goal_positions + ) + action, connection, combined_weights = model.get_action(np.array(state), False) + a_in = [[(a[0] + 1) / 4, a[1]] for a in action] + poses, distance, cos, sin, collision, goal, a, reward, positions, goal_positions = sim.step(a_in, connection, combined_weights) + avg_reward += sum(reward)/len(reward) + count += 1 + if collision: + col += 1 + if goal: + goals += 1 + done = collision or goal + avg_reward /= eval_episodes + avg_col = col / eval_episodes + avg_goal = goals / eval_episodes + print(f"Average Reward: {avg_reward}") + print(f"Average Collision rate: {avg_col}") + print(f"Average Goal rate: {avg_goal}") + print("..............................................") + model.writer.add_scalar("eval/avg_reward", avg_reward, epoch) + model.writer.add_scalar("eval/avg_col", avg_col, epoch) + model.writer.add_scalar("eval/avg_goal", avg_goal, epoch) + + +if __name__ == "__main__": + main() diff --git a/robot_nav/replay_buffer.py b/robot_nav/replay_buffer.py index 8c8eeea..f5902d7 100644 --- a/robot_nav/replay_buffer.py +++ b/robot_nav/replay_buffer.py @@ -71,8 +71,8 @@ def sample_batch(self, batch_size): s_batch = np.array([_[0] for _ in batch]) a_batch = np.array([_[1] for _ in batch]) - r_batch = np.array([_[2] for _ in batch]).reshape(-1, 1) - t_batch = np.array([_[3] for _ in batch]).reshape(-1, 1) + r_batch = np.array([_[2] for _ in batch]) + t_batch = np.array([_[3] for _ in batch]) s2_batch = np.array([_[4] for _ in batch]) return s_batch, a_batch, r_batch, t_batch, s2_batch diff --git a/robot_nav/sim.py b/robot_nav/sim.py index adfe7f0..1fbc26f 100644 --- a/robot_nav/sim.py +++ b/robot_nav/sim.py @@ -2,8 +2,7 @@ import numpy as np import random -import shapely -from irsim.lib.handler.geometry_handler import GeometryFactory +import torch class SIM_ENV: @@ -32,8 +31,9 @@ def __init__(self, world_file="robot_world.yaml", disable_plotting=False): ) robot_info = self.env.get_robot_info(0) self.robot_goal = robot_info.goal + self.num_robots = len(self.env.robot_list) - def step(self, lin_velocity=0.0, ang_velocity=0.1): + def step(self, action, connection): """ Perform one step in the simulation using the given control commands. @@ -45,26 +45,71 @@ def step(self, lin_velocity=0.0, ang_velocity=0.1): (tuple): Contains the latest LIDAR scan, distance to goal, cosine and sine of angle to goal, collision flag, goal reached flag, applied action, and computed reward. """ - self.env.step(action_id=0, action=np.array([[lin_velocity], [ang_velocity]])) + # action = [[lin_velocity, ang_velocity], [lin_velocity, ang_velocity], [lin_velocity, ang_velocity], [lin_velocity, ang_velocity], [lin_velocity, ang_velocity]] + self.env.step(action_id=[i for i in range(self.num_robots)], action=action) self.env.render() - scan = self.env.get_lidar_scan() - latest_scan = scan["ranges"] - - robot_state = self.env.get_robot_state() - goal_vector = [ - self.robot_goal[0].item() - robot_state[0].item(), - self.robot_goal[1].item() - robot_state[1].item(), - ] - distance = np.linalg.norm(goal_vector) - goal = self.env.robot.arrive - pose_vector = [np.cos(robot_state[2]).item(), np.sin(robot_state[2]).item()] - cos, sin = self.cossin(pose_vector, goal_vector) - collision = self.env.robot.collision - action = [lin_velocity, ang_velocity] - reward = self.get_reward(goal, collision, action, latest_scan) - - return latest_scan, distance, cos, sin, collision, goal, action, reward + lasers = [] + distances = [] + coss = [] + sins = [] + collisions = [] + goals = [] + rewards = [] + positions = [] + for i in range(self.num_robots): + + + + + + + scan = self.env.get_lidar_scan(id=i) + latest_scan = scan["ranges"] + robot_state = self.env.robot_list[i].state + robot_goal = self.env.robot_list[i].goal + goal_vector = [ + robot_goal[0].item() - robot_state[0].item(), + robot_goal[1].item() - robot_state[1].item(), + ] + distance = np.linalg.norm(goal_vector) + goal = self.env.robot_list[i].arrive + pose_vector = [np.cos(robot_state[2]).item(), np.sin(robot_state[2]).item()] + cos, sin = self.cossin(pose_vector, goal_vector) + collision = self.env.robot_list[i].collision + action_i = action[i] + reward = self.get_reward(goal, collision, action_i, latest_scan) + + position = [robot_state[0].item(), robot_state[1].item()] + + lasers.append(latest_scan) + distances.append(distance) + coss.append(cos) + sins.append(sin) + collisions.append(collision) + goals.append(goal) + rewards.append(reward) + positions.append(position) + + i_connections = connection[0][i].tolist() + i_connections.insert(i, 0) + + for j in range(self.num_robots): + if i_connections[j] > 0.5: + other_robot_state = self.env.robot_list[j].state + other_pos = [other_robot_state[0].item(), other_robot_state[1].item()] + rx = [position[0], other_pos[0]] + ry = [position[1], other_pos[1]] + self.env.draw_trajectory(np.array([rx, ry]), refresh=True) + + if goal: + self.env.robot_list[i].set_random_goal( + obstacle_list=self.env.obstacle_list, + init=True, + range_limits=[[1, 1, -3.141592653589793], [11, 11, 3.141592653589793]], + ) + + return lasers, distances, coss, sins, collisions, goals, action, rewards, positions def reset( self, @@ -87,39 +132,41 @@ def reset( and reward-related flags and values. """ if robot_state is None: - robot_state = [[random.uniform(1, 9)], [random.uniform(1, 9)], [0]] + robot_state = [[random.uniform(1, 11)], [random.uniform(1, 11)], [0]] - self.env.robot.set_state( - state=np.array(robot_state), - init=True, - ) + for robot in self.env.robot_list: + robot_state = [[random.uniform(1, 11)], [random.uniform(1, 11)], [0]] + robot.set_state( + state=np.array(robot_state), + init=True, + ) if random_obstacles: if random_obstacle_ids is None: - random_obstacle_ids = [i + 1 for i in range(7)] + random_obstacle_ids = [i + self.num_robots for i in range(7)] self.env.random_obstacle_position( range_low=[0, 0, -3.14], - range_high=[10, 10, 3.14], + range_high=[12, 12, 3.14], ids=random_obstacle_ids, non_overlapping=True, ) - if robot_goal is None: - self.env.robot.set_random_goal( - obstacle_list=self.env.obstacle_list, - init=True, - range_limits=[[1, 1, -3.141592653589793], [9, 9, 3.141592653589793]], - ) - else: - self.env.robot.set_goal(np.array(robot_goal), init=True) + for robot in self.env.robot_list: + if robot_goal is None: + robot.set_random_goal( + obstacle_list=self.env.obstacle_list, + init=True, + range_limits=[[1, 1, -3.141592653589793], [11, 11, 3.141592653589793]], + ) + else: + self.env.robot.set_goal(np.array(robot_goal), init=True) self.env.reset() self.robot_goal = self.env.robot.goal - action = [0.0, 0.0] - latest_scan, distance, cos, sin, _, _, action, reward = self.step( - lin_velocity=action[0], ang_velocity=action[1] - ) - return latest_scan, distance, cos, sin, False, False, action, reward + action = [[0.0, 0.0] for _ in range(self.num_robots)] + con = torch.tensor([[[0, 0], [0, 0], [0, 0]]]) + latest_scan, distance, cos, sin, _, _, action, reward, positions = self.step(action, con) + return latest_scan, distance, cos, sin, [False]*self.num_robots, [False]*self.num_robots, action, reward, positions @staticmethod def cossin(vec1, vec2): diff --git a/robot_nav/sim2.py b/robot_nav/sim2.py new file mode 100644 index 0000000..1bfcda5 --- /dev/null +++ b/robot_nav/sim2.py @@ -0,0 +1,254 @@ +import irsim +import numpy as np +import random + +import torch + + +class SIM_ENV: + """ + A simulation environment interface for robot navigation using IRSim. + + This class wraps around the IRSim environment and provides methods for stepping, + resetting, and interacting with a mobile robot, including reward computation. + + Attributes: + env (object): The simulation environment instance from IRSim. + robot_goal (np.ndarray): The goal position of the robot. + """ + + def __init__(self, world_file="robot_world.yaml", disable_plotting=False): + """ + Initialize the simulation environment. + + Args: + world_file (str): Path to the world configuration YAML file. + disable_plotting (bool): If True, disables rendering and plotting. + """ + display = False if disable_plotting else True + self.env = irsim.make( + world_file, disable_all_plot=disable_plotting, display=display + ) + robot_info = self.env.get_robot_info(0) + self.robot_goal = robot_info.goal + self.num_robots = len(self.env.robot_list) + + def step(self, action, connection, combined_weights = None): + """ + Perform one step in the simulation using the given control commands. + + Args: + lin_velocity (float): Linear velocity to apply to the robot. + ang_velocity (float): Angular velocity to apply to the robot. + + Returns: + (tuple): Contains the latest LIDAR scan, distance to goal, cosine and sine of angle to goal, + collision flag, goal reached flag, applied action, and computed reward. + """ + # action = [[lin_velocity, ang_velocity], [lin_velocity, ang_velocity], [lin_velocity, ang_velocity], [lin_velocity, ang_velocity], [lin_velocity, ang_velocity]] + self.env.step(action_id=[i for i in range(self.num_robots)], action=action) + self.env.render() + + poses = [] + distances = [] + coss = [] + sins = [] + collisions = [] + goals = [] + rewards = [] + positions = [] + goal_positions = [] + robot_states = [[self.env.robot_list[i].state[0], self.env.robot_list[i].state[1]] for i in range(self.num_robots)] + for i in range(self.num_robots): + + robot_state = self.env.robot_list[i].state + closest_robots = [np.linalg.norm([robot_states[j][0] - robot_state[0], robot_states[j][1] - robot_state[1]]) for j in + range(self.num_robots) if j != i] + robot_goal = self.env.robot_list[i].goal + goal_vector = [ + robot_goal[0].item() - robot_state[0].item(), + robot_goal[1].item() - robot_state[1].item(), + ] + distance = np.linalg.norm(goal_vector) + goal = self.env.robot_list[i].arrive + pose_vector = [np.cos(robot_state[2]).item(), np.sin(robot_state[2]).item()] + cos, sin = self.cossin(pose_vector, goal_vector) + collision = self.env.robot_list[i].collision + action_i = action[i] + reward = self.get_reward(goal, collision, action_i, closest_robots, distance) + + position = [robot_state[0].item(), robot_state[1].item()] + goal_position = [robot_goal[0].item(), robot_goal[1].item()] + + distances.append(distance) + coss.append(cos) + sins.append(sin) + collisions.append(collision) + goals.append(goal) + rewards.append(reward) + positions.append(position) + poses.append([robot_state[0].item(), robot_state[1].item(), robot_state[2].item()]) + goal_positions.append(goal_position) + + # gumbel_sample = torch.nn.functional.gumbel_softmax(connection[i], tau=0.5, dim=-1) + # i_connections = gumbel_sample.tolist() + # i_connections.insert(i, 0) + + i_probs = torch.sigmoid(connection[i]) # connection[i] is logits for "connect" per pair + + # Now we need to insert the self-connection (optional, typically skipped) + i_connections = i_probs.tolist() + i_connections.insert(i, 0) + if combined_weights is not None: + i_weights = combined_weights[i].tolist() + i_weights.insert(i, 0) + + + + for j in range(self.num_robots): + if i_connections[j] > 0.5: + if combined_weights is not None: + weight = i_weights[j] + else: + weight = 1 + other_robot_state = self.env.robot_list[j].state + other_pos = [other_robot_state[0].item(), other_robot_state[1].item()] + rx = [position[0], other_pos[0]] + ry = [position[1], other_pos[1]] + self.env.draw_trajectory(np.array([rx, ry]), refresh=True, linewidth=weight) + + if goal: + self.env.robot_list[i].set_random_goal( + obstacle_list=self.env.obstacle_list, + init=True, + # range_limits=[[self.env.robot_list[i].position[0].item()-3, self.env.robot_list[i].position[1].item()-3, -3.141592653589793], [self.env.robot_list[i].position[0].item()+3, self.env.robot_list[i].position[1].item()+3, 3.141592653589793]], + range_limits=[[1, 1, -3.141592653589793], + [11, 11, 3.141592653589793]], + ) + + return poses, distances, coss, sins, collisions, goals, action, rewards, positions, goal_positions + + def reset( + self, + robot_state=None, + robot_goal=None, + random_obstacles=False, + random_obstacle_ids=None, + ): + """ + Reset the simulation environment, optionally setting robot and obstacle states. + + Args: + robot_state (list or None): Initial state of the robot as a list of [x, y, theta, speed]. + robot_goal (list or None): Goal state for the robot. + random_obstacles (bool): Whether to randomly reposition obstacles. + random_obstacle_ids (list or None): Specific obstacle IDs to randomize. + + Returns: + (tuple): Initial observation after reset, including LIDAR scan, distance, cos/sin, + and reward-related flags and values. + """ + if robot_state is None: + robot_state = [[random.uniform(3, 9)], [random.uniform(3, 9)], [0]] + + init_states = [] + for robot in self.env.robot_list: + conflict = True + while conflict: + conflict = False + robot_state = [[random.uniform(3, 9)], [random.uniform(3, 9)], [random.uniform(-3.14, 3.14)]] + pos = [robot_state[0][0], robot_state[1][0]] + for loc in init_states: + vector = [ + pos[0] - loc[0], + pos[1] - loc[1], + ] + if np.linalg.norm(vector) < 0.6: + conflict = True + init_states.append(pos) + + robot.set_state( + state=np.array(robot_state), + init=True, + ) + + if random_obstacles: + if random_obstacle_ids is None: + random_obstacle_ids = [i + self.num_robots for i in range(7)] + self.env.random_obstacle_position( + range_low=[0, 0, -3.14], + range_high=[12, 12, 3.14], + ids=random_obstacle_ids, + non_overlapping=True, + ) + + for robot in self.env.robot_list: + if robot_goal is None: + robot.set_random_goal( + obstacle_list=self.env.obstacle_list, + init=True, + # range_limits=[[robot.position[0].item()-3, robot.position[1].item()-3, -3.141592653589793], [robot.position[0].item()+3, robot.position[1].item()+3, 3.141592653589793]], + range_limits=[[1, 1, -3.141592653589793], + [11, 11, 3.141592653589793]], + ) + else: + self.env.robot.set_goal(np.array(robot_goal), init=True) + self.env.reset() + self.robot_goal = self.env.robot.goal + + action = [[0.0, 0.0] for _ in range(self.num_robots)] + con = torch.tensor([[0. for _ in range(self.num_robots-1)] for _ in range(self.num_robots)]) + poses, distance, cos, sin, _, _, action, reward, positions, goal_positions = self.step(action, con) + return poses, distance, cos, sin, [False]*self.num_robots, [False]*self.num_robots, action, reward, positions, goal_positions + + @staticmethod + def cossin(vec1, vec2): + """ + Compute the cosine and sine of the angle between two 2D vectors. + + Args: + vec1 (list): First 2D vector. + vec2 (list): Second 2D vector. + + Returns: + (tuple): (cosine, sine) of the angle between the vectors. + """ + vec1 = vec1 / np.linalg.norm(vec1) + vec2 = vec2 / np.linalg.norm(vec2) + cos = np.dot(vec1, vec2) + sin = vec1[0] * vec2[1] - vec1[1] * vec2[0] + return cos, sin + + @staticmethod + def get_reward(goal, collision, action, closest_robots, distance): + """ + Calculate the reward for the current step. + + Args: + goal (bool): Whether the goal has been reached. + collision (bool): Whether a collision occurred. + action (list): The action taken [linear velocity, angular velocity]. + laser_scan (list): The LIDAR scan readings. + + Returns: + (float): Computed reward for the current state. + """ + # if goal: + # return 100.0 + # elif collision: + # return -100.0 + # else: + # r_dist = 0.75/distance + # cl_robot = min(closest_robots) + # cl_pen = 0 - cl_robot if cl_robot < 0 else 0 + # return 2*action[0] - abs(action[1]) - cl_pen + r_dist + + if goal: + return 20.0 + elif collision: + return -100.0 + else: + r_dist = 0.1/distance + cl_robot = min(closest_robots) + cl_pen = 2.5 - cl_robot if cl_robot < 2.5 else 0 + return 2*action[0] - abs(action[1]) - cl_pen + r_dist diff --git a/robot_nav/train.py b/robot_nav/train.py index 936ecc7..403cdb0 100644 --- a/robot_nav/train.py +++ b/robot_nav/train.py @@ -15,7 +15,7 @@ def main(args=None): """Main training function""" action_dim = 2 # number of actions produced by the model max_action = 1 # maximum absolute value of output actions - state_dim = 25 # number of input values in the neural network (vector length of state input) + state_dim = 95 # number of input values in the neural network (vector length of state input) device = torch.device( "cuda" if torch.cuda.is_available() else "cpu" ) # using cuda if it is available, cpu otherwise @@ -36,17 +36,17 @@ def main(args=None): ) save_every = 5 # save the model every n training cycles - model = TD3( + model = CNNTD3( state_dim=state_dim, action_dim=action_dim, max_action=max_action, device=device, save_every=save_every, load_model=False, - model_name="TD3", + model_name="CNNTD3", ) # instantiate a model - sim = SIM_ENV(disable_plotting=False) # instantiate environment + sim = SIM_ENV(world_file="multi_robot_world.yaml",disable_plotting=False) # instantiate environment replay_buffer = get_buffer( model, sim, From 24101f3bd19b316552e494bc548b51ecdf5f7c10 Mon Sep 17 00:00:00 2001 From: reinis Date: Fri, 20 Jun 2025 13:45:50 +0200 Subject: [PATCH 02/12] poetry update --- poetry.lock | 1077 ++++++++++++++++++-------------- pyproject.toml | 2 +- robot_nav/models/CNNTD3/att.py | 4 +- robot_nav/multi_train2.py | 29 +- robot_nav/sim2.py | 43 +- 5 files changed, 672 insertions(+), 483 deletions(-) diff --git a/poetry.lock b/poetry.lock index 80d433b..cd87a2d 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2,14 +2,14 @@ [[package]] name = "absl-py" -version = "2.2.2" +version = "2.3.0" description = "Abseil Python Common Libraries, see https://github.com/abseil/abseil-py." optional = false python-versions = ">=3.8" groups = ["main"] files = [ - {file = "absl_py-2.2.2-py3-none-any.whl", hash = "sha256:e5797bc6abe45f64fd95dc06394ca3f2bedf3b5d895e9da691c9ee3397d70092"}, - {file = "absl_py-2.2.2.tar.gz", hash = "sha256:bf25b2c2eed013ca456918c453d687eab4e8309fba81ee2f4c1a6aa2494175eb"}, + {file = "absl_py-2.3.0-py3-none-any.whl", hash = "sha256:9824a48b654a306168f63e0d97714665f8490b8d89ec7bf2efc24bf67cf579b3"}, + {file = "absl_py-2.3.0.tar.gz", hash = "sha256:d96fda5c884f1b22178852f30ffa85766d50b99e00775ea626c23304f582fc4f"}, ] [[package]] @@ -48,14 +48,14 @@ extras = ["regex"] [[package]] name = "certifi" -version = "2025.4.26" +version = "2025.6.15" description = "Python package for providing Mozilla's CA Bundle." optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" groups = ["main"] files = [ - {file = "certifi-2025.4.26-py3-none-any.whl", hash = "sha256:30350364dfe371162649852c63336a15c70c6510c2ad5015b21c2345311805f3"}, - {file = "certifi-2025.4.26.tar.gz", hash = "sha256:0a816057ea3cdefcef70270d2c515e4506bbc954f417fa5ade2021213bb8f0c6"}, + {file = "certifi-2025.6.15-py3-none-any.whl", hash = "sha256:2e0c7ce7cb5d8f8634ca55d2ba7e6ec2689a2fd6537d8dec1296a477a4910057"}, + {file = "certifi-2025.6.15.tar.gz", hash = "sha256:d747aa5a8b9bbbb1bb8c22bb13e22bd1f18e9796defa16bab421f7f7a317323b"}, ] [[package]] @@ -162,14 +162,14 @@ files = [ [[package]] name = "click" -version = "8.1.8" +version = "8.2.1" description = "Composable command line interface toolkit" optional = false -python-versions = ">=3.7" +python-versions = ">=3.10" groups = ["main"] files = [ - {file = "click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2"}, - {file = "click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a"}, + {file = "click-8.2.1-py3-none-any.whl", hash = "sha256:61a3265b914e850b85317d0b3109c7f8cd35a670f963866005d6ef1d5175a12b"}, + {file = "click-8.2.1.tar.gz", hash = "sha256:27c491cc05d968d271d5a1db13e3b5a184636d9d930f148c50b038f0d0646202"}, ] [package.dependencies] @@ -264,6 +264,89 @@ mypy = ["bokeh", "contourpy[bokeh,docs]", "docutils-stubs", "mypy (==1.15.0)", " test = ["Pillow", "contourpy[test-no-images]", "matplotlib"] test-no-images = ["pytest", "pytest-cov", "pytest-rerunfailures", "pytest-xdist", "wurlitzer"] +[[package]] +name = "coverage" +version = "7.9.1" +description = "Code coverage measurement for Python" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "coverage-7.9.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:cc94d7c5e8423920787c33d811c0be67b7be83c705f001f7180c7b186dcf10ca"}, + {file = "coverage-7.9.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:16aa0830d0c08a2c40c264cef801db8bc4fc0e1892782e45bcacbd5889270509"}, + {file = "coverage-7.9.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cf95981b126f23db63e9dbe4cf65bd71f9a6305696fa5e2262693bc4e2183f5b"}, + {file = "coverage-7.9.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f05031cf21699785cd47cb7485f67df619e7bcdae38e0fde40d23d3d0210d3c3"}, + {file = "coverage-7.9.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bb4fbcab8764dc072cb651a4bcda4d11fb5658a1d8d68842a862a6610bd8cfa3"}, + {file = "coverage-7.9.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:0f16649a7330ec307942ed27d06ee7e7a38417144620bb3d6e9a18ded8a2d3e5"}, + {file = "coverage-7.9.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:cea0a27a89e6432705fffc178064503508e3c0184b4f061700e771a09de58187"}, + {file = "coverage-7.9.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e980b53a959fa53b6f05343afbd1e6f44a23ed6c23c4b4c56c6662bbb40c82ce"}, + {file = "coverage-7.9.1-cp310-cp310-win32.whl", hash = "sha256:70760b4c5560be6ca70d11f8988ee6542b003f982b32f83d5ac0b72476607b70"}, + {file = "coverage-7.9.1-cp310-cp310-win_amd64.whl", hash = "sha256:a66e8f628b71f78c0e0342003d53b53101ba4e00ea8dabb799d9dba0abbbcebe"}, + {file = "coverage-7.9.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:95c765060e65c692da2d2f51a9499c5e9f5cf5453aeaf1420e3fc847cc060582"}, + {file = "coverage-7.9.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ba383dc6afd5ec5b7a0d0c23d38895db0e15bcba7fb0fa8901f245267ac30d86"}, + {file = "coverage-7.9.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37ae0383f13cbdcf1e5e7014489b0d71cc0106458878ccde52e8a12ced4298ed"}, + {file = "coverage-7.9.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:69aa417a030bf11ec46149636314c24c8d60fadb12fc0ee8f10fda0d918c879d"}, + {file = "coverage-7.9.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0a4be2a28656afe279b34d4f91c3e26eccf2f85500d4a4ff0b1f8b54bf807338"}, + {file = "coverage-7.9.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:382e7ddd5289f140259b610e5f5c58f713d025cb2f66d0eb17e68d0a94278875"}, + {file = "coverage-7.9.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e5532482344186c543c37bfad0ee6069e8ae4fc38d073b8bc836fc8f03c9e250"}, + {file = "coverage-7.9.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a39d18b3f50cc121d0ce3838d32d58bd1d15dab89c910358ebefc3665712256c"}, + {file = "coverage-7.9.1-cp311-cp311-win32.whl", hash = "sha256:dd24bd8d77c98557880def750782df77ab2b6885a18483dc8588792247174b32"}, + {file = "coverage-7.9.1-cp311-cp311-win_amd64.whl", hash = "sha256:6b55ad10a35a21b8015eabddc9ba31eb590f54adc9cd39bcf09ff5349fd52125"}, + {file = "coverage-7.9.1-cp311-cp311-win_arm64.whl", hash = "sha256:6ad935f0016be24c0e97fc8c40c465f9c4b85cbbe6eac48934c0dc4d2568321e"}, + {file = "coverage-7.9.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:a8de12b4b87c20de895f10567639c0797b621b22897b0af3ce4b4e204a743626"}, + {file = "coverage-7.9.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5add197315a054e92cee1b5f686a2bcba60c4c3e66ee3de77ace6c867bdee7cb"}, + {file = "coverage-7.9.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:600a1d4106fe66f41e5d0136dfbc68fe7200a5cbe85610ddf094f8f22e1b0300"}, + {file = "coverage-7.9.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2a876e4c3e5a2a1715a6608906aa5a2e0475b9c0f68343c2ada98110512ab1d8"}, + {file = "coverage-7.9.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81f34346dd63010453922c8e628a52ea2d2ccd73cb2487f7700ac531b247c8a5"}, + {file = "coverage-7.9.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:888f8eee13f2377ce86d44f338968eedec3291876b0b8a7289247ba52cb984cd"}, + {file = "coverage-7.9.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:9969ef1e69b8c8e1e70d591f91bbc37fc9a3621e447525d1602801a24ceda898"}, + {file = "coverage-7.9.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:60c458224331ee3f1a5b472773e4a085cc27a86a0b48205409d364272d67140d"}, + {file = "coverage-7.9.1-cp312-cp312-win32.whl", hash = "sha256:5f646a99a8c2b3ff4c6a6e081f78fad0dde275cd59f8f49dc4eab2e394332e74"}, + {file = "coverage-7.9.1-cp312-cp312-win_amd64.whl", hash = "sha256:30f445f85c353090b83e552dcbbdad3ec84c7967e108c3ae54556ca69955563e"}, + {file = "coverage-7.9.1-cp312-cp312-win_arm64.whl", hash = "sha256:af41da5dca398d3474129c58cb2b106a5d93bbb196be0d307ac82311ca234342"}, + {file = "coverage-7.9.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:31324f18d5969feef7344a932c32428a2d1a3e50b15a6404e97cba1cc9b2c631"}, + {file = "coverage-7.9.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0c804506d624e8a20fb3108764c52e0eef664e29d21692afa375e0dd98dc384f"}, + {file = "coverage-7.9.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ef64c27bc40189f36fcc50c3fb8f16ccda73b6a0b80d9bd6e6ce4cffcd810bbd"}, + {file = "coverage-7.9.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d4fe2348cc6ec372e25adec0219ee2334a68d2f5222e0cba9c0d613394e12d86"}, + {file = "coverage-7.9.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:34ed2186fe52fcc24d4561041979a0dec69adae7bce2ae8d1c49eace13e55c43"}, + {file = "coverage-7.9.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:25308bd3d00d5eedd5ae7d4357161f4df743e3c0240fa773ee1b0f75e6c7c0f1"}, + {file = "coverage-7.9.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:73e9439310f65d55a5a1e0564b48e34f5369bee943d72c88378f2d576f5a5751"}, + {file = "coverage-7.9.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:37ab6be0859141b53aa89412a82454b482c81cf750de4f29223d52268a86de67"}, + {file = "coverage-7.9.1-cp313-cp313-win32.whl", hash = "sha256:64bdd969456e2d02a8b08aa047a92d269c7ac1f47e0c977675d550c9a0863643"}, + {file = "coverage-7.9.1-cp313-cp313-win_amd64.whl", hash = "sha256:be9e3f68ca9edb897c2184ad0eee815c635565dbe7a0e7e814dc1f7cbab92c0a"}, + {file = "coverage-7.9.1-cp313-cp313-win_arm64.whl", hash = "sha256:1c503289ffef1d5105d91bbb4d62cbe4b14bec4d13ca225f9c73cde9bb46207d"}, + {file = "coverage-7.9.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:0b3496922cb5f4215bf5caaef4cf12364a26b0be82e9ed6d050f3352cf2d7ef0"}, + {file = "coverage-7.9.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:9565c3ab1c93310569ec0d86b017f128f027cab0b622b7af288696d7ed43a16d"}, + {file = "coverage-7.9.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2241ad5dbf79ae1d9c08fe52b36d03ca122fb9ac6bca0f34439e99f8327ac89f"}, + {file = "coverage-7.9.1-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3bb5838701ca68b10ebc0937dbd0eb81974bac54447c55cd58dea5bca8451029"}, + {file = "coverage-7.9.1-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b30a25f814591a8c0c5372c11ac8967f669b97444c47fd794926e175c4047ece"}, + {file = "coverage-7.9.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:2d04b16a6062516df97969f1ae7efd0de9c31eb6ebdceaa0d213b21c0ca1a683"}, + {file = "coverage-7.9.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:7931b9e249edefb07cd6ae10c702788546341d5fe44db5b6108a25da4dca513f"}, + {file = "coverage-7.9.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:52e92b01041151bf607ee858e5a56c62d4b70f4dac85b8c8cb7fb8a351ab2c10"}, + {file = "coverage-7.9.1-cp313-cp313t-win32.whl", hash = "sha256:684e2110ed84fd1ca5f40e89aa44adf1729dc85444004111aa01866507adf363"}, + {file = "coverage-7.9.1-cp313-cp313t-win_amd64.whl", hash = "sha256:437c576979e4db840539674e68c84b3cda82bc824dd138d56bead1435f1cb5d7"}, + {file = "coverage-7.9.1-cp313-cp313t-win_arm64.whl", hash = "sha256:18a0912944d70aaf5f399e350445738a1a20b50fbea788f640751c2ed9208b6c"}, + {file = "coverage-7.9.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6f424507f57878e424d9a95dc4ead3fbdd72fd201e404e861e465f28ea469951"}, + {file = "coverage-7.9.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:535fde4001b2783ac80865d90e7cc7798b6b126f4cd8a8c54acfe76804e54e58"}, + {file = "coverage-7.9.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:02532fd3290bb8fa6bec876520842428e2a6ed6c27014eca81b031c2d30e3f71"}, + {file = "coverage-7.9.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:56f5eb308b17bca3bbff810f55ee26d51926d9f89ba92707ee41d3c061257e55"}, + {file = "coverage-7.9.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bfa447506c1a52271f1b0de3f42ea0fa14676052549095e378d5bff1c505ff7b"}, + {file = "coverage-7.9.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:9ca8e220006966b4a7b68e8984a6aee645a0384b0769e829ba60281fe61ec4f7"}, + {file = "coverage-7.9.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:49f1d0788ba5b7ba65933f3a18864117c6506619f5ca80326b478f72acf3f385"}, + {file = "coverage-7.9.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:68cd53aec6f45b8e4724c0950ce86eacb775c6be01ce6e3669fe4f3a21e768ed"}, + {file = "coverage-7.9.1-cp39-cp39-win32.whl", hash = "sha256:95335095b6c7b1cc14c3f3f17d5452ce677e8490d101698562b2ffcacc304c8d"}, + {file = "coverage-7.9.1-cp39-cp39-win_amd64.whl", hash = "sha256:e1b5191d1648acc439b24721caab2fd0c86679d8549ed2c84d5a7ec1bedcc244"}, + {file = "coverage-7.9.1-pp39.pp310.pp311-none-any.whl", hash = "sha256:db0f04118d1db74db6c9e1cb1898532c7dcc220f1d2718f058601f7c3f499514"}, + {file = "coverage-7.9.1-py3-none-any.whl", hash = "sha256:66b974b145aa189516b6bf2d8423e888b742517d37872f6ee4c5be0073bd9a3c"}, + {file = "coverage-7.9.1.tar.gz", hash = "sha256:6cf43c78c4282708a28e466316935ec7489a9c487518a77fa68f716c67909cec"}, +] + +[package.dependencies] +tomli = {version = "*", optional = true, markers = "python_full_version <= \"3.11.0a6\" and extra == \"toml\""} + +[package.extras] +toml = ["tomli"] + [[package]] name = "cycler" version = "0.12.1" @@ -294,17 +377,20 @@ files = [ [[package]] name = "exceptiongroup" -version = "1.2.2" +version = "1.3.0" description = "Backport of PEP 654 (exception groups)" optional = false python-versions = ">=3.7" groups = ["main"] markers = "python_version < \"3.11\"" files = [ - {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"}, - {file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"}, + {file = "exceptiongroup-1.3.0-py3-none-any.whl", hash = "sha256:4d111e6e0c13d0644cad6ddaa7ed0261a0b36971f6d23e7ec9b4b9097da78a10"}, + {file = "exceptiongroup-1.3.0.tar.gz", hash = "sha256:b241f5885f560bc56a59ee63ca4c6a8bfa46ae4ad651af316d4e81817bb9fd88"}, ] +[package.dependencies] +typing-extensions = {version = ">=4.6.0", markers = "python_version < \"3.13\""} + [package.extras] test = ["pytest (>=6)"] @@ -327,62 +413,54 @@ typing = ["typing-extensions (>=4.12.2)"] [[package]] name = "fonttools" -version = "4.57.0" +version = "4.58.4" description = "Tools to manipulate font files" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" groups = ["main"] files = [ - {file = "fonttools-4.57.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:babe8d1eb059a53e560e7bf29f8e8f4accc8b6cfb9b5fd10e485bde77e71ef41"}, - {file = "fonttools-4.57.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:81aa97669cd726349eb7bd43ca540cf418b279ee3caba5e2e295fb4e8f841c02"}, - {file = "fonttools-4.57.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f0e9618630edd1910ad4f07f60d77c184b2f572c8ee43305ea3265675cbbfe7e"}, - {file = "fonttools-4.57.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:34687a5d21f1d688d7d8d416cb4c5b9c87fca8a1797ec0d74b9fdebfa55c09ab"}, - {file = "fonttools-4.57.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:69ab81b66ebaa8d430ba56c7a5f9abe0183afefd3a2d6e483060343398b13fb1"}, - {file = "fonttools-4.57.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:d639397de852f2ccfb3134b152c741406752640a266d9c1365b0f23d7b88077f"}, - {file = "fonttools-4.57.0-cp310-cp310-win32.whl", hash = "sha256:cc066cb98b912f525ae901a24cd381a656f024f76203bc85f78fcc9e66ae5aec"}, - {file = "fonttools-4.57.0-cp310-cp310-win_amd64.whl", hash = "sha256:7a64edd3ff6a7f711a15bd70b4458611fb240176ec11ad8845ccbab4fe6745db"}, - {file = "fonttools-4.57.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:3871349303bdec958360eedb619169a779956503ffb4543bb3e6211e09b647c4"}, - {file = "fonttools-4.57.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c59375e85126b15a90fcba3443eaac58f3073ba091f02410eaa286da9ad80ed8"}, - {file = "fonttools-4.57.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:967b65232e104f4b0f6370a62eb33089e00024f2ce143aecbf9755649421c683"}, - {file = "fonttools-4.57.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:39acf68abdfc74e19de7485f8f7396fa4d2418efea239b7061d6ed6a2510c746"}, - {file = "fonttools-4.57.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:9d077f909f2343daf4495ba22bb0e23b62886e8ec7c109ee8234bdbd678cf344"}, - {file = "fonttools-4.57.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:46370ac47a1e91895d40e9ad48effbe8e9d9db1a4b80888095bc00e7beaa042f"}, - {file = "fonttools-4.57.0-cp311-cp311-win32.whl", hash = "sha256:ca2aed95855506b7ae94e8f1f6217b7673c929e4f4f1217bcaa236253055cb36"}, - {file = "fonttools-4.57.0-cp311-cp311-win_amd64.whl", hash = "sha256:17168a4670bbe3775f3f3f72d23ee786bd965395381dfbb70111e25e81505b9d"}, - {file = "fonttools-4.57.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:889e45e976c74abc7256d3064aa7c1295aa283c6bb19810b9f8b604dfe5c7f31"}, - {file = "fonttools-4.57.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:0425c2e052a5f1516c94e5855dbda706ae5a768631e9fcc34e57d074d1b65b92"}, - {file = "fonttools-4.57.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:44c26a311be2ac130f40a96769264809d3b0cb297518669db437d1cc82974888"}, - {file = "fonttools-4.57.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:84c41ba992df5b8d680b89fd84c6a1f2aca2b9f1ae8a67400c8930cd4ea115f6"}, - {file = "fonttools-4.57.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ea1e9e43ca56b0c12440a7c689b1350066595bebcaa83baad05b8b2675129d98"}, - {file = "fonttools-4.57.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:84fd56c78d431606332a0627c16e2a63d243d0d8b05521257d77c6529abe14d8"}, - {file = "fonttools-4.57.0-cp312-cp312-win32.whl", hash = "sha256:f4376819c1c778d59e0a31db5dc6ede854e9edf28bbfa5b756604727f7f800ac"}, - {file = "fonttools-4.57.0-cp312-cp312-win_amd64.whl", hash = "sha256:57e30241524879ea10cdf79c737037221f77cc126a8cdc8ff2c94d4a522504b9"}, - {file = "fonttools-4.57.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:408ce299696012d503b714778d89aa476f032414ae57e57b42e4b92363e0b8ef"}, - {file = "fonttools-4.57.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:bbceffc80aa02d9e8b99f2a7491ed8c4a783b2fc4020119dc405ca14fb5c758c"}, - {file = "fonttools-4.57.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f022601f3ee9e1f6658ed6d184ce27fa5216cee5b82d279e0f0bde5deebece72"}, - {file = "fonttools-4.57.0-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4dea5893b58d4637ffa925536462ba626f8a1b9ffbe2f5c272cdf2c6ebadb817"}, - {file = "fonttools-4.57.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:dff02c5c8423a657c550b48231d0a48d7e2b2e131088e55983cfe74ccc2c7cc9"}, - {file = "fonttools-4.57.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:767604f244dc17c68d3e2dbf98e038d11a18abc078f2d0f84b6c24571d9c0b13"}, - {file = "fonttools-4.57.0-cp313-cp313-win32.whl", hash = "sha256:8e2e12d0d862f43d51e5afb8b9751c77e6bec7d2dc00aad80641364e9df5b199"}, - {file = "fonttools-4.57.0-cp313-cp313-win_amd64.whl", hash = "sha256:f1d6bc9c23356908db712d282acb3eebd4ae5ec6d8b696aa40342b1d84f8e9e3"}, - {file = "fonttools-4.57.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:9d57b4e23ebbe985125d3f0cabbf286efa191ab60bbadb9326091050d88e8213"}, - {file = "fonttools-4.57.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:579ba873d7f2a96f78b2e11028f7472146ae181cae0e4d814a37a09e93d5c5cc"}, - {file = "fonttools-4.57.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6e3e1ec10c29bae0ea826b61f265ec5c858c5ba2ce2e69a71a62f285cf8e4595"}, - {file = "fonttools-4.57.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a1968f2a2003c97c4ce6308dc2498d5fd4364ad309900930aa5a503c9851aec8"}, - {file = "fonttools-4.57.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:aff40f8ac6763d05c2c8f6d240c6dac4bb92640a86d9b0c3f3fff4404f34095c"}, - {file = "fonttools-4.57.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:d07f1b64008e39fceae7aa99e38df8385d7d24a474a8c9872645c4397b674481"}, - {file = "fonttools-4.57.0-cp38-cp38-win32.whl", hash = "sha256:51d8482e96b28fb28aa8e50b5706f3cee06de85cbe2dce80dbd1917ae22ec5a6"}, - {file = "fonttools-4.57.0-cp38-cp38-win_amd64.whl", hash = "sha256:03290e818782e7edb159474144fca11e36a8ed6663d1fcbd5268eb550594fd8e"}, - {file = "fonttools-4.57.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7339e6a3283e4b0ade99cade51e97cde3d54cd6d1c3744459e886b66d630c8b3"}, - {file = "fonttools-4.57.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:05efceb2cb5f6ec92a4180fcb7a64aa8d3385fd49cfbbe459350229d1974f0b1"}, - {file = "fonttools-4.57.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a97bb05eb24637714a04dee85bdf0ad1941df64fe3b802ee4ac1c284a5f97b7c"}, - {file = "fonttools-4.57.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:541cb48191a19ceb1a2a4b90c1fcebd22a1ff7491010d3cf840dd3a68aebd654"}, - {file = "fonttools-4.57.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:cdef9a056c222d0479a1fdb721430f9efd68268014c54e8166133d2643cb05d9"}, - {file = "fonttools-4.57.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:3cf97236b192a50a4bf200dc5ba405aa78d4f537a2c6e4c624bb60466d5b03bd"}, - {file = "fonttools-4.57.0-cp39-cp39-win32.whl", hash = "sha256:e952c684274a7714b3160f57ec1d78309f955c6335c04433f07d36c5eb27b1f9"}, - {file = "fonttools-4.57.0-cp39-cp39-win_amd64.whl", hash = "sha256:a2a722c0e4bfd9966a11ff55c895c817158fcce1b2b6700205a376403b546ad9"}, - {file = "fonttools-4.57.0-py3-none-any.whl", hash = "sha256:3122c604a675513c68bd24c6a8f9091f1c2376d18e8f5fe5a101746c81b3e98f"}, - {file = "fonttools-4.57.0.tar.gz", hash = "sha256:727ece10e065be2f9dd239d15dd5d60a66e17eac11aea47d447f9f03fdbc42de"}, + {file = "fonttools-4.58.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:834542f13fee7625ad753b2db035edb674b07522fcbdd0ed9e9a9e2a1034467f"}, + {file = "fonttools-4.58.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2e6c61ce330142525296170cd65666e46121fc0d44383cbbcfa39cf8f58383df"}, + {file = "fonttools-4.58.4-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e9c75f8faa29579c0fbf29b56ae6a3660c6c025f3b671803cb6a9caa7e4e3a98"}, + {file = "fonttools-4.58.4-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:88dedcedbd5549e35b2ea3db3de02579c27e62e51af56779c021e7b33caadd0e"}, + {file = "fonttools-4.58.4-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:ae80a895adab43586f4da1521d58fd4f4377cef322ee0cc205abcefa3a5effc3"}, + {file = "fonttools-4.58.4-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:0d3acc7f0d151da116e87a182aefb569cf0a3c8e0fd4c9cd0a7c1e7d3e7adb26"}, + {file = "fonttools-4.58.4-cp310-cp310-win32.whl", hash = "sha256:1244f69686008e7e8d2581d9f37eef330a73fee3843f1107993eb82c9d306577"}, + {file = "fonttools-4.58.4-cp310-cp310-win_amd64.whl", hash = "sha256:2a66c0af8a01eb2b78645af60f3b787de5fe5eb1fd8348163715b80bdbfbde1f"}, + {file = "fonttools-4.58.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:a3841991c9ee2dc0562eb7f23d333d34ce81e8e27c903846f0487da21e0028eb"}, + {file = "fonttools-4.58.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3c98f91b6a9604e7ffb5ece6ea346fa617f967c2c0944228801246ed56084664"}, + {file = "fonttools-4.58.4-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ab9f891eb687ddf6a4e5f82901e00f992e18012ca97ab7acd15f13632acd14c1"}, + {file = "fonttools-4.58.4-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:891c5771e8f0094b7c0dc90eda8fc75e72930b32581418f2c285a9feedfd9a68"}, + {file = "fonttools-4.58.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:43ba4d9646045c375d22e3473b7d82b18b31ee2ac715cd94220ffab7bc2d5c1d"}, + {file = "fonttools-4.58.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:33d19f16e6d2ffd6669bda574a6589941f6c99a8d5cfb9f464038244c71555de"}, + {file = "fonttools-4.58.4-cp311-cp311-win32.whl", hash = "sha256:b59e5109b907da19dc9df1287454821a34a75f2632a491dd406e46ff432c2a24"}, + {file = "fonttools-4.58.4-cp311-cp311-win_amd64.whl", hash = "sha256:3d471a5b567a0d1648f2e148c9a8bcf00d9ac76eb89e976d9976582044cc2509"}, + {file = "fonttools-4.58.4-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:462211c0f37a278494e74267a994f6be9a2023d0557aaa9ecbcbfce0f403b5a6"}, + {file = "fonttools-4.58.4-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:0c7a12fb6f769165547f00fcaa8d0df9517603ae7e04b625e5acb8639809b82d"}, + {file = "fonttools-4.58.4-cp312-cp312-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:2d42c63020a922154add0a326388a60a55504629edc3274bc273cd3806b4659f"}, + {file = "fonttools-4.58.4-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8f2b4e6fd45edc6805f5f2c355590b092ffc7e10a945bd6a569fc66c1d2ae7aa"}, + {file = "fonttools-4.58.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:f155b927f6efb1213a79334e4cb9904d1e18973376ffc17a0d7cd43d31981f1e"}, + {file = "fonttools-4.58.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e38f687d5de97c7fb7da3e58169fb5ba349e464e141f83c3c2e2beb91d317816"}, + {file = "fonttools-4.58.4-cp312-cp312-win32.whl", hash = "sha256:636c073b4da9db053aa683db99580cac0f7c213a953b678f69acbca3443c12cc"}, + {file = "fonttools-4.58.4-cp312-cp312-win_amd64.whl", hash = "sha256:82e8470535743409b30913ba2822e20077acf9ea70acec40b10fcf5671dceb58"}, + {file = "fonttools-4.58.4-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:5f4a64846495c543796fa59b90b7a7a9dff6839bd852741ab35a71994d685c6d"}, + {file = "fonttools-4.58.4-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e80661793a5d4d7ad132a2aa1eae2e160fbdbb50831a0edf37c7c63b2ed36574"}, + {file = "fonttools-4.58.4-cp313-cp313-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:fe5807fc64e4ba5130f1974c045a6e8d795f3b7fb6debfa511d1773290dbb76b"}, + {file = "fonttools-4.58.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b610b9bef841cb8f4b50472494158b1e347d15cad56eac414c722eda695a6cfd"}, + {file = "fonttools-4.58.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:2daa7f0e213c38f05f054eb5e1730bd0424aebddbeac094489ea1585807dd187"}, + {file = "fonttools-4.58.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:66cccb6c0b944496b7f26450e9a66e997739c513ffaac728d24930df2fd9d35b"}, + {file = "fonttools-4.58.4-cp313-cp313-win32.whl", hash = "sha256:94d2aebb5ca59a5107825520fde596e344652c1f18170ef01dacbe48fa60c889"}, + {file = "fonttools-4.58.4-cp313-cp313-win_amd64.whl", hash = "sha256:b554bd6e80bba582fd326ddab296e563c20c64dca816d5e30489760e0c41529f"}, + {file = "fonttools-4.58.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:ca773fe7812e4e1197ee4e63b9691e89650ab55f679e12ac86052d2fe0d152cd"}, + {file = "fonttools-4.58.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e31289101221910f44245472e02b1a2f7d671c6d06a45c07b354ecb25829ad92"}, + {file = "fonttools-4.58.4-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:90c9e3c01475bb9602cb617f69f02c4ba7ab7784d93f0b0d685e84286f4c1a10"}, + {file = "fonttools-4.58.4-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:e00a826f2bc745a010341ac102082fe5e3fb9f0861b90ed9ff32277598813711"}, + {file = "fonttools-4.58.4-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:bc75e72e9d2a4ad0935c59713bd38679d51c6fefab1eadde80e3ed4c2a11ea84"}, + {file = "fonttools-4.58.4-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:f57a795e540059ce3de68508acfaaf177899b39c36ef0a2833b2308db98c71f1"}, + {file = "fonttools-4.58.4-cp39-cp39-win32.whl", hash = "sha256:a7d04f64c88b48ede655abcf76f2b2952f04933567884d99be7c89e0a4495131"}, + {file = "fonttools-4.58.4-cp39-cp39-win_amd64.whl", hash = "sha256:5a8bc5dfd425c89b1c38380bc138787b0a830f761b82b37139aa080915503b69"}, + {file = "fonttools-4.58.4-py3-none-any.whl", hash = "sha256:a10ce13a13f26cbb9f37512a4346bb437ad7e002ff6fa966a7ce7ff5ac3528bd"}, + {file = "fonttools-4.58.4.tar.gz", hash = "sha256:928a8009b9884ed3aae17724b960987575155ca23c6f0b8146e400cc9e0d44ba"}, ] [package.extras] @@ -401,14 +479,14 @@ woff = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "zopfli (>=0.1.4)"] [[package]] name = "fsspec" -version = "2025.3.2" +version = "2025.5.1" description = "File-system specification" optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "fsspec-2025.3.2-py3-none-any.whl", hash = "sha256:2daf8dc3d1dfa65b6aa37748d112773a7a08416f6c70d96b264c96476ecaf711"}, - {file = "fsspec-2025.3.2.tar.gz", hash = "sha256:e52c77ef398680bbd6a98c0e628fbc469491282981209907bbc8aea76a04fdc6"}, + {file = "fsspec-2025.5.1-py3-none-any.whl", hash = "sha256:24d3a2e663d5fc735ab256263c4075f374a174c3410c0b25e5bd1970bceaa462"}, + {file = "fsspec-2025.5.1.tar.gz", hash = "sha256:2e55e47a540b91843b755e83ded97c6e897fa0942b11490113f09e9c443c2475"}, ] [package.extras] @@ -474,67 +552,67 @@ colorama = ">=0.4" [[package]] name = "grpcio" -version = "1.71.0" +version = "1.73.0" description = "HTTP/2-based RPC framework" optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "grpcio-1.71.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:c200cb6f2393468142eb50ab19613229dcc7829b5ccee8b658a36005f6669fdd"}, - {file = "grpcio-1.71.0-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:b2266862c5ad664a380fbbcdbdb8289d71464c42a8c29053820ee78ba0119e5d"}, - {file = "grpcio-1.71.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:0ab8b2864396663a5b0b0d6d79495657ae85fa37dcb6498a2669d067c65c11ea"}, - {file = "grpcio-1.71.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c30f393f9d5ff00a71bb56de4aa75b8fe91b161aeb61d39528db6b768d7eac69"}, - {file = "grpcio-1.71.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f250ff44843d9a0615e350c77f890082102a0318d66a99540f54769c8766ab73"}, - {file = "grpcio-1.71.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e6d8de076528f7c43a2f576bc311799f89d795aa6c9b637377cc2b1616473804"}, - {file = "grpcio-1.71.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:9b91879d6da1605811ebc60d21ab6a7e4bae6c35f6b63a061d61eb818c8168f6"}, - {file = "grpcio-1.71.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f71574afdf944e6652203cd1badcda195b2a27d9c83e6d88dc1ce3cfb73b31a5"}, - {file = "grpcio-1.71.0-cp310-cp310-win32.whl", hash = "sha256:8997d6785e93308f277884ee6899ba63baafa0dfb4729748200fcc537858a509"}, - {file = "grpcio-1.71.0-cp310-cp310-win_amd64.whl", hash = "sha256:7d6ac9481d9d0d129224f6d5934d5832c4b1cddb96b59e7eba8416868909786a"}, - {file = "grpcio-1.71.0-cp311-cp311-linux_armv7l.whl", hash = "sha256:d6aa986318c36508dc1d5001a3ff169a15b99b9f96ef5e98e13522c506b37eef"}, - {file = "grpcio-1.71.0-cp311-cp311-macosx_10_14_universal2.whl", hash = "sha256:d2c170247315f2d7e5798a22358e982ad6eeb68fa20cf7a820bb74c11f0736e7"}, - {file = "grpcio-1.71.0-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:e6f83a583ed0a5b08c5bc7a3fe860bb3c2eac1f03f1f63e0bc2091325605d2b7"}, - {file = "grpcio-1.71.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4be74ddeeb92cc87190e0e376dbc8fc7736dbb6d3d454f2fa1f5be1dee26b9d7"}, - {file = "grpcio-1.71.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4dd0dfbe4d5eb1fcfec9490ca13f82b089a309dc3678e2edabc144051270a66e"}, - {file = "grpcio-1.71.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a2242d6950dc892afdf9e951ed7ff89473aaf744b7d5727ad56bdaace363722b"}, - {file = "grpcio-1.71.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:0fa05ee31a20456b13ae49ad2e5d585265f71dd19fbd9ef983c28f926d45d0a7"}, - {file = "grpcio-1.71.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3d081e859fb1ebe176de33fc3adb26c7d46b8812f906042705346b314bde32c3"}, - {file = "grpcio-1.71.0-cp311-cp311-win32.whl", hash = "sha256:d6de81c9c00c8a23047136b11794b3584cdc1460ed7cbc10eada50614baa1444"}, - {file = "grpcio-1.71.0-cp311-cp311-win_amd64.whl", hash = "sha256:24e867651fc67717b6f896d5f0cac0ec863a8b5fb7d6441c2ab428f52c651c6b"}, - {file = "grpcio-1.71.0-cp312-cp312-linux_armv7l.whl", hash = "sha256:0ff35c8d807c1c7531d3002be03221ff9ae15712b53ab46e2a0b4bb271f38537"}, - {file = "grpcio-1.71.0-cp312-cp312-macosx_10_14_universal2.whl", hash = "sha256:b78a99cd1ece4be92ab7c07765a0b038194ded2e0a26fd654591ee136088d8d7"}, - {file = "grpcio-1.71.0-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:dc1a1231ed23caac1de9f943d031f1bc38d0f69d2a3b243ea0d664fc1fbd7fec"}, - {file = "grpcio-1.71.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e6beeea5566092c5e3c4896c6d1d307fb46b1d4bdf3e70c8340b190a69198594"}, - {file = "grpcio-1.71.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d5170929109450a2c031cfe87d6716f2fae39695ad5335d9106ae88cc32dc84c"}, - {file = "grpcio-1.71.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:5b08d03ace7aca7b2fadd4baf291139b4a5f058805a8327bfe9aece7253b6d67"}, - {file = "grpcio-1.71.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:f903017db76bf9cc2b2d8bdd37bf04b505bbccad6be8a81e1542206875d0e9db"}, - {file = "grpcio-1.71.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:469f42a0b410883185eab4689060a20488a1a0a00f8bbb3cbc1061197b4c5a79"}, - {file = "grpcio-1.71.0-cp312-cp312-win32.whl", hash = "sha256:ad9f30838550695b5eb302add33f21f7301b882937460dd24f24b3cc5a95067a"}, - {file = "grpcio-1.71.0-cp312-cp312-win_amd64.whl", hash = "sha256:652350609332de6dac4ece254e5d7e1ff834e203d6afb769601f286886f6f3a8"}, - {file = "grpcio-1.71.0-cp313-cp313-linux_armv7l.whl", hash = "sha256:cebc1b34ba40a312ab480ccdb396ff3c529377a2fce72c45a741f7215bfe8379"}, - {file = "grpcio-1.71.0-cp313-cp313-macosx_10_14_universal2.whl", hash = "sha256:85da336e3649a3d2171e82f696b5cad2c6231fdd5bad52616476235681bee5b3"}, - {file = "grpcio-1.71.0-cp313-cp313-manylinux_2_17_aarch64.whl", hash = "sha256:f9a412f55bb6e8f3bb000e020dbc1e709627dcb3a56f6431fa7076b4c1aab0db"}, - {file = "grpcio-1.71.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:47be9584729534660416f6d2a3108aaeac1122f6b5bdbf9fd823e11fe6fbaa29"}, - {file = "grpcio-1.71.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7c9c80ac6091c916db81131d50926a93ab162a7e97e4428ffc186b6e80d6dda4"}, - {file = "grpcio-1.71.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:789d5e2a3a15419374b7b45cd680b1e83bbc1e52b9086e49308e2c0b5bbae6e3"}, - {file = "grpcio-1.71.0-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:1be857615e26a86d7363e8a163fade914595c81fec962b3d514a4b1e8760467b"}, - {file = "grpcio-1.71.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:a76d39b5fafd79ed604c4be0a869ec3581a172a707e2a8d7a4858cb05a5a7637"}, - {file = "grpcio-1.71.0-cp313-cp313-win32.whl", hash = "sha256:74258dce215cb1995083daa17b379a1a5a87d275387b7ffe137f1d5131e2cfbb"}, - {file = "grpcio-1.71.0-cp313-cp313-win_amd64.whl", hash = "sha256:22c3bc8d488c039a199f7a003a38cb7635db6656fa96437a8accde8322ce2366"}, - {file = "grpcio-1.71.0-cp39-cp39-linux_armv7l.whl", hash = "sha256:c6a0a28450c16809f94e0b5bfe52cabff63e7e4b97b44123ebf77f448534d07d"}, - {file = "grpcio-1.71.0-cp39-cp39-macosx_10_14_universal2.whl", hash = "sha256:a371e6b6a5379d3692cc4ea1cb92754d2a47bdddeee755d3203d1f84ae08e03e"}, - {file = "grpcio-1.71.0-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:39983a9245d37394fd59de71e88c4b295eb510a3555e0a847d9965088cdbd033"}, - {file = "grpcio-1.71.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9182e0063112e55e74ee7584769ec5a0b4f18252c35787f48738627e23a62b97"}, - {file = "grpcio-1.71.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:693bc706c031aeb848849b9d1c6b63ae6bcc64057984bb91a542332b75aa4c3d"}, - {file = "grpcio-1.71.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:20e8f653abd5ec606be69540f57289274c9ca503ed38388481e98fa396ed0b41"}, - {file = "grpcio-1.71.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:8700a2a57771cc43ea295296330daaddc0d93c088f0a35cc969292b6db959bf3"}, - {file = "grpcio-1.71.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d35a95f05a8a2cbe8e02be137740138b3b2ea5f80bd004444e4f9a1ffc511e32"}, - {file = "grpcio-1.71.0-cp39-cp39-win32.whl", hash = "sha256:f9c30c464cb2ddfbc2ddf9400287701270fdc0f14be5f08a1e3939f1e749b455"}, - {file = "grpcio-1.71.0-cp39-cp39-win_amd64.whl", hash = "sha256:63e41b91032f298b3e973b3fa4093cbbc620c875e2da7b93e249d4728b54559a"}, - {file = "grpcio-1.71.0.tar.gz", hash = "sha256:2b85f7820475ad3edec209d3d89a7909ada16caab05d3f2e08a7e8ae3200a55c"}, + {file = "grpcio-1.73.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:d050197eeed50f858ef6c51ab09514856f957dba7b1f7812698260fc9cc417f6"}, + {file = "grpcio-1.73.0-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:ebb8d5f4b0200916fb292a964a4d41210de92aba9007e33d8551d85800ea16cb"}, + {file = "grpcio-1.73.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:c0811331b469e3f15dda5f90ab71bcd9681189a83944fd6dc908e2c9249041ef"}, + {file = "grpcio-1.73.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:12787c791c3993d0ea1cc8bf90393647e9a586066b3b322949365d2772ba965b"}, + {file = "grpcio-1.73.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2c17771e884fddf152f2a0df12478e8d02853e5b602a10a9a9f1f52fa02b1d32"}, + {file = "grpcio-1.73.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:275e23d4c428c26b51857bbd95fcb8e528783597207ec592571e4372b300a29f"}, + {file = "grpcio-1.73.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:9ffc972b530bf73ef0f948f799482a1bf12d9b6f33406a8e6387c0ca2098a833"}, + {file = "grpcio-1.73.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ebd8d269df64aff092b2cec5e015d8ae09c7e90888b5c35c24fdca719a2c9f35"}, + {file = "grpcio-1.73.0-cp310-cp310-win32.whl", hash = "sha256:072d8154b8f74300ed362c01d54af8b93200c1a9077aeaea79828d48598514f1"}, + {file = "grpcio-1.73.0-cp310-cp310-win_amd64.whl", hash = "sha256:ce953d9d2100e1078a76a9dc2b7338d5415924dc59c69a15bf6e734db8a0f1ca"}, + {file = "grpcio-1.73.0-cp311-cp311-linux_armv7l.whl", hash = "sha256:51036f641f171eebe5fa7aaca5abbd6150f0c338dab3a58f9111354240fe36ec"}, + {file = "grpcio-1.73.0-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:d12bbb88381ea00bdd92c55aff3da3391fd85bc902c41275c8447b86f036ce0f"}, + {file = "grpcio-1.73.0-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:483c507c2328ed0e01bc1adb13d1eada05cc737ec301d8e5a8f4a90f387f1790"}, + {file = "grpcio-1.73.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c201a34aa960c962d0ce23fe5f423f97e9d4b518ad605eae6d0a82171809caaa"}, + {file = "grpcio-1.73.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:859f70c8e435e8e1fa060e04297c6818ffc81ca9ebd4940e180490958229a45a"}, + {file = "grpcio-1.73.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e2459a27c6886e7e687e4e407778425f3c6a971fa17a16420227bda39574d64b"}, + {file = "grpcio-1.73.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:e0084d4559ee3dbdcce9395e1bc90fdd0262529b32c417a39ecbc18da8074ac7"}, + {file = "grpcio-1.73.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:ef5fff73d5f724755693a464d444ee0a448c6cdfd3c1616a9223f736c622617d"}, + {file = "grpcio-1.73.0-cp311-cp311-win32.whl", hash = "sha256:965a16b71a8eeef91fc4df1dc40dc39c344887249174053814f8a8e18449c4c3"}, + {file = "grpcio-1.73.0-cp311-cp311-win_amd64.whl", hash = "sha256:b71a7b4483d1f753bbc11089ff0f6fa63b49c97a9cc20552cded3fcad466d23b"}, + {file = "grpcio-1.73.0-cp312-cp312-linux_armv7l.whl", hash = "sha256:fb9d7c27089d9ba3746f18d2109eb530ef2a37452d2ff50f5a6696cd39167d3b"}, + {file = "grpcio-1.73.0-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:128ba2ebdac41e41554d492b82c34586a90ebd0766f8ebd72160c0e3a57b9155"}, + {file = "grpcio-1.73.0-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:068ecc415f79408d57a7f146f54cdf9f0acb4b301a52a9e563973dc981e82f3d"}, + {file = "grpcio-1.73.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ddc1cfb2240f84d35d559ade18f69dcd4257dbaa5ba0de1a565d903aaab2968"}, + {file = "grpcio-1.73.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e53007f70d9783f53b41b4cf38ed39a8e348011437e4c287eee7dd1d39d54b2f"}, + {file = "grpcio-1.73.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:4dd8d8d092efede7d6f48d695ba2592046acd04ccf421436dd7ed52677a9ad29"}, + {file = "grpcio-1.73.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:70176093d0a95b44d24baa9c034bb67bfe2b6b5f7ebc2836f4093c97010e17fd"}, + {file = "grpcio-1.73.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:085ebe876373ca095e24ced95c8f440495ed0b574c491f7f4f714ff794bbcd10"}, + {file = "grpcio-1.73.0-cp312-cp312-win32.whl", hash = "sha256:cfc556c1d6aef02c727ec7d0016827a73bfe67193e47c546f7cadd3ee6bf1a60"}, + {file = "grpcio-1.73.0-cp312-cp312-win_amd64.whl", hash = "sha256:bbf45d59d090bf69f1e4e1594832aaf40aa84b31659af3c5e2c3f6a35202791a"}, + {file = "grpcio-1.73.0-cp313-cp313-linux_armv7l.whl", hash = "sha256:da1d677018ef423202aca6d73a8d3b2cb245699eb7f50eb5f74cae15a8e1f724"}, + {file = "grpcio-1.73.0-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:36bf93f6a657f37c131d9dd2c391b867abf1426a86727c3575393e9e11dadb0d"}, + {file = "grpcio-1.73.0-cp313-cp313-manylinux_2_17_aarch64.whl", hash = "sha256:d84000367508ade791d90c2bafbd905574b5ced8056397027a77a215d601ba15"}, + {file = "grpcio-1.73.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c98ba1d928a178ce33f3425ff823318040a2b7ef875d30a0073565e5ceb058d9"}, + {file = "grpcio-1.73.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a73c72922dfd30b396a5f25bb3a4590195ee45ecde7ee068acb0892d2900cf07"}, + {file = "grpcio-1.73.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:10e8edc035724aba0346a432060fd192b42bd03675d083c01553cab071a28da5"}, + {file = "grpcio-1.73.0-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:f5cdc332b503c33b1643b12ea933582c7b081957c8bc2ea4cc4bc58054a09288"}, + {file = "grpcio-1.73.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:07ad7c57233c2109e4ac999cb9c2710c3b8e3f491a73b058b0ce431f31ed8145"}, + {file = "grpcio-1.73.0-cp313-cp313-win32.whl", hash = "sha256:0eb5df4f41ea10bda99a802b2a292d85be28958ede2a50f2beb8c7fc9a738419"}, + {file = "grpcio-1.73.0-cp313-cp313-win_amd64.whl", hash = "sha256:38cf518cc54cd0c47c9539cefa8888549fcc067db0b0c66a46535ca8032020c4"}, + {file = "grpcio-1.73.0-cp39-cp39-linux_armv7l.whl", hash = "sha256:1284850607901cfe1475852d808e5a102133461ec9380bc3fc9ebc0686ee8e32"}, + {file = "grpcio-1.73.0-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:0e092a4b28eefb63eec00d09ef33291cd4c3a0875cde29aec4d11d74434d222c"}, + {file = "grpcio-1.73.0-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:33577fe7febffe8ebad458744cfee8914e0c10b09f0ff073a6b149a84df8ab8f"}, + {file = "grpcio-1.73.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:60813d8a16420d01fa0da1fc7ebfaaa49a7e5051b0337cd48f4f950eb249a08e"}, + {file = "grpcio-1.73.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2a9c957dc65e5d474378d7bcc557e9184576605d4b4539e8ead6e351d7ccce20"}, + {file = "grpcio-1.73.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:3902b71407d021163ea93c70c8531551f71ae742db15b66826cf8825707d2908"}, + {file = "grpcio-1.73.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:1dd7fa7276dcf061e2d5f9316604499eea06b1b23e34a9380572d74fe59915a8"}, + {file = "grpcio-1.73.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2d1510c4ea473110cb46a010555f2c1a279d1c256edb276e17fa571ba1e8927c"}, + {file = "grpcio-1.73.0-cp39-cp39-win32.whl", hash = "sha256:d0a1517b2005ba1235a1190b98509264bf72e231215dfeef8db9a5a92868789e"}, + {file = "grpcio-1.73.0-cp39-cp39-win_amd64.whl", hash = "sha256:6228f7eb6d9f785f38b589d49957fca5df3d5b5349e77d2d89b14e390165344c"}, + {file = "grpcio-1.73.0.tar.gz", hash = "sha256:3af4c30918a7f0d39de500d11255f8d9da4f30e94a2033e70fe2a720e184bd8e"}, ] [package.extras] -protobuf = ["grpcio-tools (>=1.71.0)"] +protobuf = ["grpcio-tools (>=1.73.0)"] [[package]] name = "idna" @@ -564,8 +642,10 @@ files = [ ] [package.dependencies] +imageio-ffmpeg = {version = "*", optional = true, markers = "extra == \"ffmpeg\""} numpy = "*" pillow = ">=8.3.2" +psutil = {version = "*", optional = true, markers = "extra == \"ffmpeg\""} [package.extras] all-plugins = ["astropy", "av", "imageio-ffmpeg", "numpy (>2)", "pillow-heif", "psutil", "rawpy", "tifffile"] @@ -585,6 +665,23 @@ rawpy = ["numpy (>2)", "rawpy"] test = ["fsspec[github]", "pytest", "pytest-cov"] tifffile = ["tifffile"] +[[package]] +name = "imageio-ffmpeg" +version = "0.6.0" +description = "FFMPEG wrapper for Python" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "imageio_ffmpeg-0.6.0-py3-none-macosx_10_9_intel.macosx_10_9_x86_64.whl", hash = "sha256:9d2baaf867088508d4a3458e61eeb30e945c4ad8016025545f66c4b5aaef0a61"}, + {file = "imageio_ffmpeg-0.6.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:b1ae3173414b5fc5f538a726c4e48ea97edc0d2cdc11f103afee655c463fa742"}, + {file = "imageio_ffmpeg-0.6.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:1d47bebd83d2c5fc770720d211855f208af8a596c82d17730aa51e815cdee6dc"}, + {file = "imageio_ffmpeg-0.6.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:c7e46fcec401dd990405049d2e2f475e2b397779df2519b544b8aab515195282"}, + {file = "imageio_ffmpeg-0.6.0-py3-none-win32.whl", hash = "sha256:196faa79366b4a82f95c0f4053191d2013f4714a715780f0ad2a68ff37483cc2"}, + {file = "imageio_ffmpeg-0.6.0-py3-none-win_amd64.whl", hash = "sha256:02fa47c83703c37df6bfe4896aab339013f62bf02c5ebf2dce6da56af04ffc0a"}, + {file = "imageio_ffmpeg-0.6.0.tar.gz", hash = "sha256:e2556bed8e005564a9f925bb7afa4002d82770d6b08825078b7697ab88ba1755"}, +] + [[package]] name = "iniconfig" version = "2.1.0" @@ -599,30 +696,36 @@ files = [ [[package]] name = "ir-sim" -version = "2.4.4" +version = "2.5.4" description = "IR-SIM is an open-source, lightweight robot simulator based on Python, designed for robotics navigation, control, and learning. This simulator provides a simple and user-friendly framework for simulating robots, sensors, and environments, facilitating the development and testing of robotics algorithms with minimal hardware requirements." optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "ir_sim-2.4.4-py3-none-any.whl", hash = "sha256:09556cbb514f4571c620c500e00fb801b45520e76d691fab99e87c93c23dc371"}, - {file = "ir_sim-2.4.4.tar.gz", hash = "sha256:6663cfa5e021a2cfb72621ffb61f4046c1e5489b81c022842bf0ffc168541164"}, + {file = "ir_sim-2.5.4-py3-none-any.whl", hash = "sha256:a899f6da22ee53fb0ac150c9d2054d3de86e101e029c775a9265d3b49e9c6885"}, + {file = "ir_sim-2.5.4.tar.gz", hash = "sha256:30c17a2b1708f9096e19d60968cf7eb77f563008f52ed4b406462c501f76683c"}, ] [package.dependencies] -imageio = "*" +imageio = [ + {version = "*"}, + {version = "*", extras = ["ffmpeg"], optional = true, markers = "extra == \"all\""}, +] loguru = "*" -matplotlib = "<3.10.0" +matplotlib = "*" numpy = "*" pynput = {version = "*", optional = true, markers = "extra == \"all\""} +pytest = {version = "*", optional = true, markers = "extra == \"all\""} +pytest-cov = {version = "*", optional = true, markers = "extra == \"all\""} pyyaml = "*" scipy = "*" shapely = ">=2.0.3" -tabulate = "*" +tabulate = {version = "*", optional = true, markers = "extra == \"all\""} [package.extras] -all = ["pynput"] -keyboard = ["pynput"] +all = ["imageio[ffmpeg]", "pynput", "pytest", "pytest-cov", "tabulate"] +keyboard = ["pynput", "tabulate"] +test = ["pytest", "pytest-cov"] [[package]] name = "jinja2" @@ -753,14 +856,14 @@ dev = ["Sphinx (==7.2.5)", "colorama (==0.4.5)", "colorama (==0.4.6)", "exceptio [[package]] name = "markdown" -version = "3.8" +version = "3.8.2" description = "Python implementation of John Gruber's Markdown." optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "markdown-3.8-py3-none-any.whl", hash = "sha256:794a929b79c5af141ef5ab0f2f642d0f7b1872981250230e72682346f7cc90dc"}, - {file = "markdown-3.8.tar.gz", hash = "sha256:7df81e63f0df5c4b24b7d156eb81e4690595239b7d70937d0409f1b0de319c6f"}, + {file = "markdown-3.8.2-py3-none-any.whl", hash = "sha256:5c83764dbd4e00bdd94d85a19b8d55ccca20fe35b2e678a1422b380324dd5f24"}, + {file = "markdown-3.8.2.tar.gz", hash = "sha256:247b9a70dd12e27f67431ce62523e675b866d254f900c4fe75ce3dda62237c45"}, ] [package.extras] @@ -840,53 +943,46 @@ files = [ [[package]] name = "matplotlib" -version = "3.9.4" +version = "3.10.3" description = "Python plotting package" optional = false -python-versions = ">=3.9" +python-versions = ">=3.10" groups = ["main"] files = [ - {file = "matplotlib-3.9.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:c5fdd7abfb706dfa8d307af64a87f1a862879ec3cd8d0ec8637458f0885b9c50"}, - {file = "matplotlib-3.9.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d89bc4e85e40a71d1477780366c27fb7c6494d293e1617788986f74e2a03d7ff"}, - {file = "matplotlib-3.9.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ddf9f3c26aae695c5daafbf6b94e4c1a30d6cd617ba594bbbded3b33a1fcfa26"}, - {file = "matplotlib-3.9.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18ebcf248030173b59a868fda1fe42397253f6698995b55e81e1f57431d85e50"}, - {file = "matplotlib-3.9.4-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:974896ec43c672ec23f3f8c648981e8bc880ee163146e0312a9b8def2fac66f5"}, - {file = "matplotlib-3.9.4-cp310-cp310-win_amd64.whl", hash = "sha256:4598c394ae9711cec135639374e70871fa36b56afae17bdf032a345be552a88d"}, - {file = "matplotlib-3.9.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d4dd29641d9fb8bc4492420c5480398dd40a09afd73aebe4eb9d0071a05fbe0c"}, - {file = "matplotlib-3.9.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:30e5b22e8bcfb95442bf7d48b0d7f3bdf4a450cbf68986ea45fca3d11ae9d099"}, - {file = "matplotlib-3.9.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2bb0030d1d447fd56dcc23b4c64a26e44e898f0416276cac1ebc25522e0ac249"}, - {file = "matplotlib-3.9.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aca90ed222ac3565d2752b83dbb27627480d27662671e4d39da72e97f657a423"}, - {file = "matplotlib-3.9.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a181b2aa2906c608fcae72f977a4a2d76e385578939891b91c2550c39ecf361e"}, - {file = "matplotlib-3.9.4-cp311-cp311-win_amd64.whl", hash = "sha256:1f6882828231eca17f501c4dcd98a05abb3f03d157fbc0769c6911fe08b6cfd3"}, - {file = "matplotlib-3.9.4-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:dfc48d67e6661378a21c2983200a654b72b5c5cdbd5d2cf6e5e1ece860f0cc70"}, - {file = "matplotlib-3.9.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:47aef0fab8332d02d68e786eba8113ffd6f862182ea2999379dec9e237b7e483"}, - {file = "matplotlib-3.9.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fba1f52c6b7dc764097f52fd9ab627b90db452c9feb653a59945de16752e965f"}, - {file = "matplotlib-3.9.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:173ac3748acaac21afcc3fa1633924609ba1b87749006bc25051c52c422a5d00"}, - {file = "matplotlib-3.9.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:320edea0cadc07007765e33f878b13b3738ffa9745c5f707705692df70ffe0e0"}, - {file = "matplotlib-3.9.4-cp312-cp312-win_amd64.whl", hash = "sha256:a4a4cfc82330b27042a7169533da7991e8789d180dd5b3daeaee57d75cd5a03b"}, - {file = "matplotlib-3.9.4-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:37eeffeeca3c940985b80f5b9a7b95ea35671e0e7405001f249848d2b62351b6"}, - {file = "matplotlib-3.9.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3e7465ac859ee4abcb0d836137cd8414e7bb7ad330d905abced457217d4f0f45"}, - {file = "matplotlib-3.9.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f4c12302c34afa0cf061bea23b331e747e5e554b0fa595c96e01c7b75bc3b858"}, - {file = "matplotlib-3.9.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2b8c97917f21b75e72108b97707ba3d48f171541a74aa2a56df7a40626bafc64"}, - {file = "matplotlib-3.9.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:0229803bd7e19271b03cb09f27db76c918c467aa4ce2ae168171bc67c3f508df"}, - {file = "matplotlib-3.9.4-cp313-cp313-win_amd64.whl", hash = "sha256:7c0d8ef442ebf56ff5e206f8083d08252ee738e04f3dc88ea882853a05488799"}, - {file = "matplotlib-3.9.4-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:a04c3b00066a688834356d196136349cb32f5e1003c55ac419e91585168b88fb"}, - {file = "matplotlib-3.9.4-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:04c519587f6c210626741a1e9a68eefc05966ede24205db8982841826af5871a"}, - {file = "matplotlib-3.9.4-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:308afbf1a228b8b525fcd5cec17f246bbbb63b175a3ef6eb7b4d33287ca0cf0c"}, - {file = "matplotlib-3.9.4-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ddb3b02246ddcffd3ce98e88fed5b238bc5faff10dbbaa42090ea13241d15764"}, - {file = "matplotlib-3.9.4-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:8a75287e9cb9eee48cb79ec1d806f75b29c0fde978cb7223a1f4c5848d696041"}, - {file = "matplotlib-3.9.4-cp313-cp313t-win_amd64.whl", hash = "sha256:488deb7af140f0ba86da003e66e10d55ff915e152c78b4b66d231638400b1965"}, - {file = "matplotlib-3.9.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:3c3724d89a387ddf78ff88d2a30ca78ac2b4c89cf37f2db4bd453c34799e933c"}, - {file = "matplotlib-3.9.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d5f0a8430ffe23d7e32cfd86445864ccad141797f7d25b7c41759a5b5d17cfd7"}, - {file = "matplotlib-3.9.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6bb0141a21aef3b64b633dc4d16cbd5fc538b727e4958be82a0e1c92a234160e"}, - {file = "matplotlib-3.9.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:57aa235109e9eed52e2c2949db17da185383fa71083c00c6c143a60e07e0888c"}, - {file = "matplotlib-3.9.4-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:b18c600061477ccfdd1e6fd050c33d8be82431700f3452b297a56d9ed7037abb"}, - {file = "matplotlib-3.9.4-cp39-cp39-win_amd64.whl", hash = "sha256:ef5f2d1b67d2d2145ff75e10f8c008bfbf71d45137c4b648c87193e7dd053eac"}, - {file = "matplotlib-3.9.4-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:44e0ed786d769d85bc787b0606a53f2d8d2d1d3c8a2608237365e9121c1a338c"}, - {file = "matplotlib-3.9.4-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:09debb9ce941eb23ecdbe7eab972b1c3e0276dcf01688073faff7b0f61d6c6ca"}, - {file = "matplotlib-3.9.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bcc53cf157a657bfd03afab14774d54ba73aa84d42cfe2480c91bd94873952db"}, - {file = "matplotlib-3.9.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:ad45da51be7ad02387801fd154ef74d942f49fe3fcd26a64c94842ba7ec0d865"}, - {file = "matplotlib-3.9.4.tar.gz", hash = "sha256:1e00e8be7393cbdc6fedfa8a6fba02cf3e83814b285db1c60b906a023ba41bc3"}, + {file = "matplotlib-3.10.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:213fadd6348d106ca7db99e113f1bea1e65e383c3ba76e8556ba4a3054b65ae7"}, + {file = "matplotlib-3.10.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d3bec61cb8221f0ca6313889308326e7bb303d0d302c5cc9e523b2f2e6c73deb"}, + {file = "matplotlib-3.10.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c21ae75651c0231b3ba014b6d5e08fb969c40cdb5a011e33e99ed0c9ea86ecb"}, + {file = "matplotlib-3.10.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a49e39755580b08e30e3620efc659330eac5d6534ab7eae50fa5e31f53ee4e30"}, + {file = "matplotlib-3.10.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:cf4636203e1190871d3a73664dea03d26fb019b66692cbfd642faafdad6208e8"}, + {file = "matplotlib-3.10.3-cp310-cp310-win_amd64.whl", hash = "sha256:fd5641a9bb9d55f4dd2afe897a53b537c834b9012684c8444cc105895c8c16fd"}, + {file = "matplotlib-3.10.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:0ef061f74cd488586f552d0c336b2f078d43bc00dc473d2c3e7bfee2272f3fa8"}, + {file = "matplotlib-3.10.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d96985d14dc5f4a736bbea4b9de9afaa735f8a0fc2ca75be2fa9e96b2097369d"}, + {file = "matplotlib-3.10.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7c5f0283da91e9522bdba4d6583ed9d5521566f63729ffb68334f86d0bb98049"}, + {file = "matplotlib-3.10.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fdfa07c0ec58035242bc8b2c8aae37037c9a886370eef6850703d7583e19964b"}, + {file = "matplotlib-3.10.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c0b9849a17bce080a16ebcb80a7b714b5677d0ec32161a2cc0a8e5a6030ae220"}, + {file = "matplotlib-3.10.3-cp311-cp311-win_amd64.whl", hash = "sha256:eef6ed6c03717083bc6d69c2d7ee8624205c29a8e6ea5a31cd3492ecdbaee1e1"}, + {file = "matplotlib-3.10.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:0ab1affc11d1f495ab9e6362b8174a25afc19c081ba5b0775ef00533a4236eea"}, + {file = "matplotlib-3.10.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2a818d8bdcafa7ed2eed74487fdb071c09c1ae24152d403952adad11fa3c65b4"}, + {file = "matplotlib-3.10.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:748ebc3470c253e770b17d8b0557f0aa85cf8c63fd52f1a61af5b27ec0b7ffee"}, + {file = "matplotlib-3.10.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ed70453fd99733293ace1aec568255bc51c6361cb0da94fa5ebf0649fdb2150a"}, + {file = "matplotlib-3.10.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:dbed9917b44070e55640bd13419de83b4c918e52d97561544814ba463811cbc7"}, + {file = "matplotlib-3.10.3-cp312-cp312-win_amd64.whl", hash = "sha256:cf37d8c6ef1a48829443e8ba5227b44236d7fcaf7647caa3178a4ff9f7a5be05"}, + {file = "matplotlib-3.10.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:9f2efccc8dcf2b86fc4ee849eea5dcaecedd0773b30f47980dc0cbeabf26ec84"}, + {file = "matplotlib-3.10.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3ddbba06a6c126e3301c3d272a99dcbe7f6c24c14024e80307ff03791a5f294e"}, + {file = "matplotlib-3.10.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:748302b33ae9326995b238f606e9ed840bf5886ebafcb233775d946aa8107a15"}, + {file = "matplotlib-3.10.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a80fcccbef63302c0efd78042ea3c2436104c5b1a4d3ae20f864593696364ac7"}, + {file = "matplotlib-3.10.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:55e46cbfe1f8586adb34f7587c3e4f7dedc59d5226719faf6cb54fc24f2fd52d"}, + {file = "matplotlib-3.10.3-cp313-cp313-win_amd64.whl", hash = "sha256:151d89cb8d33cb23345cd12490c76fd5d18a56581a16d950b48c6ff19bb2ab93"}, + {file = "matplotlib-3.10.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:c26dd9834e74d164d06433dc7be5d75a1e9890b926b3e57e74fa446e1a62c3e2"}, + {file = "matplotlib-3.10.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:24853dad5b8c84c8c2390fc31ce4858b6df504156893292ce8092d190ef8151d"}, + {file = "matplotlib-3.10.3-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68f7878214d369d7d4215e2a9075fef743be38fa401d32e6020bab2dfabaa566"}, + {file = "matplotlib-3.10.3-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6929fc618cb6db9cb75086f73b3219bbb25920cb24cee2ea7a12b04971a4158"}, + {file = "matplotlib-3.10.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:6c7818292a5cc372a2dc4c795e5c356942eb8350b98ef913f7fda51fe175ac5d"}, + {file = "matplotlib-3.10.3-cp313-cp313t-win_amd64.whl", hash = "sha256:4f23ffe95c5667ef8a2b56eea9b53db7f43910fa4a2d5472ae0f72b64deab4d5"}, + {file = "matplotlib-3.10.3-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:86ab63d66bbc83fdb6733471d3bff40897c1e9921cba112accd748eee4bce5e4"}, + {file = "matplotlib-3.10.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:a48f9c08bf7444b5d2391a83e75edb464ccda3c380384b36532a0962593a1751"}, + {file = "matplotlib-3.10.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cb73d8aa75a237457988f9765e4dfe1c0d2453c5ca4eabc897d4309672c8e014"}, + {file = "matplotlib-3.10.3.tar.gz", hash = "sha256:2f82d2c5bb7ae93aaaa4cd42aca65d76ce6376f83304fa3a630b569aca274df0"}, ] [package.dependencies] @@ -901,7 +997,7 @@ pyparsing = ">=2.3.1" python-dateutil = ">=2.7" [package.extras] -dev = ["meson-python (>=0.13.1,<0.17.0)", "numpy (>=1.25)", "pybind11 (>=2.6,!=2.13.3)", "setuptools (>=64)", "setuptools_scm (>=7)"] +dev = ["meson-python (>=0.13.1,<0.17.0)", "pybind11 (>=2.13.2,!=2.13.3)", "setuptools (>=64)", "setuptools_scm (>=7)"] [[package]] name = "mergedeep" @@ -948,14 +1044,14 @@ min-versions = ["babel (==2.9.0)", "click (==7.0)", "colorama (==0.4)", "ghp-imp [[package]] name = "mkdocs-autorefs" -version = "1.4.1" +version = "1.4.2" description = "Automatically link across pages in MkDocs." optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "mkdocs_autorefs-1.4.1-py3-none-any.whl", hash = "sha256:9793c5ac06a6ebbe52ec0f8439256e66187badf4b5334b5fde0b128ec134df4f"}, - {file = "mkdocs_autorefs-1.4.1.tar.gz", hash = "sha256:4b5b6235a4becb2b10425c2fa191737e415b37aa3418919db33e5d774c9db079"}, + {file = "mkdocs_autorefs-1.4.2-py3-none-any.whl", hash = "sha256:83d6d777b66ec3c372a1aad4ae0cf77c243ba5bcda5bf0c6b8a2c5e7a3d89f13"}, + {file = "mkdocs_autorefs-1.4.2.tar.gz", hash = "sha256:e2ebe1abd2b67d597ed19378c0fff84d73d1dbce411fce7a7cc6f161888b6749"}, ] [package.dependencies] @@ -982,14 +1078,14 @@ pyyaml = ">=5.1" [[package]] name = "mkdocs-material" -version = "9.6.12" +version = "9.6.14" description = "Documentation that simply works" optional = false python-versions = ">=3.8" groups = ["main"] files = [ - {file = "mkdocs_material-9.6.12-py3-none-any.whl", hash = "sha256:92b4fbdc329e4febc267ca6e2c51e8501fa97b2225c5f4deb4d4e43550f8e61e"}, - {file = "mkdocs_material-9.6.12.tar.gz", hash = "sha256:add6a6337b29f9ea7912cb1efc661de2c369060b040eb5119855d794ea85b473"}, + {file = "mkdocs_material-9.6.14-py3-none-any.whl", hash = "sha256:3b9cee6d3688551bf7a8e8f41afda97a3c39a12f0325436d76c86706114b721b"}, + {file = "mkdocs_material-9.6.14.tar.gz", hash = "sha256:39d795e90dce6b531387c255bd07e866e027828b7346d3eba5ac3de265053754"}, ] [package.dependencies] @@ -1050,14 +1146,14 @@ python-legacy = ["mkdocstrings-python-legacy (>=0.2.1)"] [[package]] name = "mkdocstrings-python" -version = "1.16.10" +version = "1.16.12" description = "A Python handler for mkdocstrings." optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "mkdocstrings_python-1.16.10-py3-none-any.whl", hash = "sha256:63bb9f01f8848a644bdb6289e86dc38ceddeaa63ecc2e291e3b2ca52702a6643"}, - {file = "mkdocstrings_python-1.16.10.tar.gz", hash = "sha256:f9eedfd98effb612ab4d0ed6dd2b73aff6eba5215e0a65cea6d877717f75502e"}, + {file = "mkdocstrings_python-1.16.12-py3-none-any.whl", hash = "sha256:22ded3a63b3d823d57457a70ff9860d5a4de9e8b1e482876fc9baabaf6f5f374"}, + {file = "mkdocstrings_python-1.16.12.tar.gz", hash = "sha256:9b9eaa066e0024342d433e332a41095c4e429937024945fea511afe58f63175d"}, ] [package.dependencies] @@ -1106,67 +1202,67 @@ test = ["pytest (>=7.2)", "pytest-cov (>=4.0)"] [[package]] name = "numpy" -version = "2.2.5" +version = "2.2.6" description = "Fundamental package for array computing in Python" optional = false python-versions = ">=3.10" groups = ["main"] files = [ - {file = "numpy-2.2.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1f4a922da1729f4c40932b2af4fe84909c7a6e167e6e99f71838ce3a29f3fe26"}, - {file = "numpy-2.2.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b6f91524d31b34f4a5fee24f5bc16dcd1491b668798b6d85585d836c1e633a6a"}, - {file = "numpy-2.2.5-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:19f4718c9012e3baea91a7dba661dcab2451cda2550678dc30d53acb91a7290f"}, - {file = "numpy-2.2.5-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:eb7fd5b184e5d277afa9ec0ad5e4eb562ecff541e7f60e69ee69c8d59e9aeaba"}, - {file = "numpy-2.2.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6413d48a9be53e183eb06495d8e3b006ef8f87c324af68241bbe7a39e8ff54c3"}, - {file = "numpy-2.2.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7451f92eddf8503c9b8aa4fe6aa7e87fd51a29c2cfc5f7dbd72efde6c65acf57"}, - {file = "numpy-2.2.5-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:0bcb1d057b7571334139129b7f941588f69ce7c4ed15a9d6162b2ea54ded700c"}, - {file = "numpy-2.2.5-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:36ab5b23915887543441efd0417e6a3baa08634308894316f446027611b53bf1"}, - {file = "numpy-2.2.5-cp310-cp310-win32.whl", hash = "sha256:422cc684f17bc963da5f59a31530b3936f57c95a29743056ef7a7903a5dbdf88"}, - {file = "numpy-2.2.5-cp310-cp310-win_amd64.whl", hash = "sha256:e4f0b035d9d0ed519c813ee23e0a733db81ec37d2e9503afbb6e54ccfdee0fa7"}, - {file = "numpy-2.2.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c42365005c7a6c42436a54d28c43fe0e01ca11eb2ac3cefe796c25a5f98e5e9b"}, - {file = "numpy-2.2.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:498815b96f67dc347e03b719ef49c772589fb74b8ee9ea2c37feae915ad6ebda"}, - {file = "numpy-2.2.5-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:6411f744f7f20081b1b4e7112e0f4c9c5b08f94b9f086e6f0adf3645f85d3a4d"}, - {file = "numpy-2.2.5-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:9de6832228f617c9ef45d948ec1cd8949c482238d68b2477e6f642c33a7b0a54"}, - {file = "numpy-2.2.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:369e0d4647c17c9363244f3468f2227d557a74b6781cb62ce57cf3ef5cc7c610"}, - {file = "numpy-2.2.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:262d23f383170f99cd9191a7c85b9a50970fe9069b2f8ab5d786eca8a675d60b"}, - {file = "numpy-2.2.5-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:aa70fdbdc3b169d69e8c59e65c07a1c9351ceb438e627f0fdcd471015cd956be"}, - {file = "numpy-2.2.5-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:37e32e985f03c06206582a7323ef926b4e78bdaa6915095ef08070471865b906"}, - {file = "numpy-2.2.5-cp311-cp311-win32.whl", hash = "sha256:f5045039100ed58fa817a6227a356240ea1b9a1bc141018864c306c1a16d4175"}, - {file = "numpy-2.2.5-cp311-cp311-win_amd64.whl", hash = "sha256:b13f04968b46ad705f7c8a80122a42ae8f620536ea38cf4bdd374302926424dd"}, - {file = "numpy-2.2.5-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:ee461a4eaab4f165b68780a6a1af95fb23a29932be7569b9fab666c407969051"}, - {file = "numpy-2.2.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ec31367fd6a255dc8de4772bd1658c3e926d8e860a0b6e922b615e532d320ddc"}, - {file = "numpy-2.2.5-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:47834cde750d3c9f4e52c6ca28a7361859fcaf52695c7dc3cc1a720b8922683e"}, - {file = "numpy-2.2.5-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:2c1a1c6ccce4022383583a6ded7bbcda22fc635eb4eb1e0a053336425ed36dfa"}, - {file = "numpy-2.2.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9d75f338f5f79ee23548b03d801d28a505198297534f62416391857ea0479571"}, - {file = "numpy-2.2.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a801fef99668f309b88640e28d261991bfad9617c27beda4a3aec4f217ea073"}, - {file = "numpy-2.2.5-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:abe38cd8381245a7f49967a6010e77dbf3680bd3627c0fe4362dd693b404c7f8"}, - {file = "numpy-2.2.5-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:5a0ac90e46fdb5649ab6369d1ab6104bfe5854ab19b645bf5cda0127a13034ae"}, - {file = "numpy-2.2.5-cp312-cp312-win32.whl", hash = "sha256:0cd48122a6b7eab8f06404805b1bd5856200e3ed6f8a1b9a194f9d9054631beb"}, - {file = "numpy-2.2.5-cp312-cp312-win_amd64.whl", hash = "sha256:ced69262a8278547e63409b2653b372bf4baff0870c57efa76c5703fd6543282"}, - {file = "numpy-2.2.5-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:059b51b658f4414fff78c6d7b1b4e18283ab5fa56d270ff212d5ba0c561846f4"}, - {file = "numpy-2.2.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:47f9ed103af0bc63182609044b0490747e03bd20a67e391192dde119bf43d52f"}, - {file = "numpy-2.2.5-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:261a1ef047751bb02f29dfe337230b5882b54521ca121fc7f62668133cb119c9"}, - {file = "numpy-2.2.5-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:4520caa3807c1ceb005d125a75e715567806fed67e315cea619d5ec6e75a4191"}, - {file = "numpy-2.2.5-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3d14b17b9be5f9c9301f43d2e2a4886a33b53f4e6fdf9ca2f4cc60aeeee76372"}, - {file = "numpy-2.2.5-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ba321813a00e508d5421104464510cc962a6f791aa2fca1c97b1e65027da80d"}, - {file = "numpy-2.2.5-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a4cbdef3ddf777423060c6f81b5694bad2dc9675f110c4b2a60dc0181543fac7"}, - {file = "numpy-2.2.5-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:54088a5a147ab71a8e7fdfd8c3601972751ded0739c6b696ad9cb0343e21ab73"}, - {file = "numpy-2.2.5-cp313-cp313-win32.whl", hash = "sha256:c8b82a55ef86a2d8e81b63da85e55f5537d2157165be1cb2ce7cfa57b6aef38b"}, - {file = "numpy-2.2.5-cp313-cp313-win_amd64.whl", hash = "sha256:d8882a829fd779f0f43998e931c466802a77ca1ee0fe25a3abe50278616b1471"}, - {file = "numpy-2.2.5-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:e8b025c351b9f0e8b5436cf28a07fa4ac0204d67b38f01433ac7f9b870fa38c6"}, - {file = "numpy-2.2.5-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:8dfa94b6a4374e7851bbb6f35e6ded2120b752b063e6acdd3157e4d2bb922eba"}, - {file = "numpy-2.2.5-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:97c8425d4e26437e65e1d189d22dff4a079b747ff9c2788057bfb8114ce1e133"}, - {file = "numpy-2.2.5-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:352d330048c055ea6db701130abc48a21bec690a8d38f8284e00fab256dc1376"}, - {file = "numpy-2.2.5-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b4c0773b6ada798f51f0f8e30c054d32304ccc6e9c5d93d46cb26f3d385ab19"}, - {file = "numpy-2.2.5-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:55f09e00d4dccd76b179c0f18a44f041e5332fd0e022886ba1c0bbf3ea4a18d0"}, - {file = "numpy-2.2.5-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:02f226baeefa68f7d579e213d0f3493496397d8f1cff5e2b222af274c86a552a"}, - {file = "numpy-2.2.5-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:c26843fd58f65da9491165072da2cccc372530681de481ef670dcc8e27cfb066"}, - {file = "numpy-2.2.5-cp313-cp313t-win32.whl", hash = "sha256:1a161c2c79ab30fe4501d5a2bbfe8b162490757cf90b7f05be8b80bc02f7bb8e"}, - {file = "numpy-2.2.5-cp313-cp313t-win_amd64.whl", hash = "sha256:d403c84991b5ad291d3809bace5e85f4bbf44a04bdc9a88ed2bb1807b3360bb8"}, - {file = "numpy-2.2.5-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:b4ea7e1cff6784e58fe281ce7e7f05036b3e1c89c6f922a6bfbc0a7e8768adbe"}, - {file = "numpy-2.2.5-pp310-pypy310_pp73-macosx_14_0_x86_64.whl", hash = "sha256:d7543263084a85fbc09c704b515395398d31d6395518446237eac219eab9e55e"}, - {file = "numpy-2.2.5-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0255732338c4fdd00996c0421884ea8a3651eea555c3a56b84892b66f696eb70"}, - {file = "numpy-2.2.5-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:d2e3bdadaba0e040d1e7ab39db73e0afe2c74ae277f5614dad53eadbecbbb169"}, - {file = "numpy-2.2.5.tar.gz", hash = "sha256:a9c0d994680cd991b1cb772e8b297340085466a6fe964bc9d4e80f5e2f43c291"}, + {file = "numpy-2.2.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b412caa66f72040e6d268491a59f2c43bf03eb6c96dd8f0307829feb7fa2b6fb"}, + {file = "numpy-2.2.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8e41fd67c52b86603a91c1a505ebaef50b3314de0213461c7a6e99c9a3beff90"}, + {file = "numpy-2.2.6-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:37e990a01ae6ec7fe7fa1c26c55ecb672dd98b19c3d0e1d1f326fa13cb38d163"}, + {file = "numpy-2.2.6-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:5a6429d4be8ca66d889b7cf70f536a397dc45ba6faeb5f8c5427935d9592e9cf"}, + {file = "numpy-2.2.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:efd28d4e9cd7d7a8d39074a4d44c63eda73401580c5c76acda2ce969e0a38e83"}, + {file = "numpy-2.2.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc7b73d02efb0e18c000e9ad8b83480dfcd5dfd11065997ed4c6747470ae8915"}, + {file = "numpy-2.2.6-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:74d4531beb257d2c3f4b261bfb0fc09e0f9ebb8842d82a7b4209415896adc680"}, + {file = "numpy-2.2.6-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:8fc377d995680230e83241d8a96def29f204b5782f371c532579b4f20607a289"}, + {file = "numpy-2.2.6-cp310-cp310-win32.whl", hash = "sha256:b093dd74e50a8cba3e873868d9e93a85b78e0daf2e98c6797566ad8044e8363d"}, + {file = "numpy-2.2.6-cp310-cp310-win_amd64.whl", hash = "sha256:f0fd6321b839904e15c46e0d257fdd101dd7f530fe03fd6359c1ea63738703f3"}, + {file = "numpy-2.2.6-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f9f1adb22318e121c5c69a09142811a201ef17ab257a1e66ca3025065b7f53ae"}, + {file = "numpy-2.2.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c820a93b0255bc360f53eca31a0e676fd1101f673dda8da93454a12e23fc5f7a"}, + {file = "numpy-2.2.6-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:3d70692235e759f260c3d837193090014aebdf026dfd167834bcba43e30c2a42"}, + {file = "numpy-2.2.6-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:481b49095335f8eed42e39e8041327c05b0f6f4780488f61286ed3c01368d491"}, + {file = "numpy-2.2.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b64d8d4d17135e00c8e346e0a738deb17e754230d7e0810ac5012750bbd85a5a"}, + {file = "numpy-2.2.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba10f8411898fc418a521833e014a77d3ca01c15b0c6cdcce6a0d2897e6dbbdf"}, + {file = "numpy-2.2.6-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:bd48227a919f1bafbdda0583705e547892342c26fb127219d60a5c36882609d1"}, + {file = "numpy-2.2.6-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9551a499bf125c1d4f9e250377c1ee2eddd02e01eac6644c080162c0c51778ab"}, + {file = "numpy-2.2.6-cp311-cp311-win32.whl", hash = "sha256:0678000bb9ac1475cd454c6b8c799206af8107e310843532b04d49649c717a47"}, + {file = "numpy-2.2.6-cp311-cp311-win_amd64.whl", hash = "sha256:e8213002e427c69c45a52bbd94163084025f533a55a59d6f9c5b820774ef3303"}, + {file = "numpy-2.2.6-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:41c5a21f4a04fa86436124d388f6ed60a9343a6f767fced1a8a71c3fbca038ff"}, + {file = "numpy-2.2.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:de749064336d37e340f640b05f24e9e3dd678c57318c7289d222a8a2f543e90c"}, + {file = "numpy-2.2.6-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:894b3a42502226a1cac872f840030665f33326fc3dac8e57c607905773cdcde3"}, + {file = "numpy-2.2.6-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:71594f7c51a18e728451bb50cc60a3ce4e6538822731b2933209a1f3614e9282"}, + {file = "numpy-2.2.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f2618db89be1b4e05f7a1a847a9c1c0abd63e63a1607d892dd54668dd92faf87"}, + {file = "numpy-2.2.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd83c01228a688733f1ded5201c678f0c53ecc1006ffbc404db9f7a899ac6249"}, + {file = "numpy-2.2.6-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:37c0ca431f82cd5fa716eca9506aefcabc247fb27ba69c5062a6d3ade8cf8f49"}, + {file = "numpy-2.2.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fe27749d33bb772c80dcd84ae7e8df2adc920ae8297400dabec45f0dedb3f6de"}, + {file = "numpy-2.2.6-cp312-cp312-win32.whl", hash = "sha256:4eeaae00d789f66c7a25ac5f34b71a7035bb474e679f410e5e1a94deb24cf2d4"}, + {file = "numpy-2.2.6-cp312-cp312-win_amd64.whl", hash = "sha256:c1f9540be57940698ed329904db803cf7a402f3fc200bfe599334c9bd84a40b2"}, + {file = "numpy-2.2.6-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0811bb762109d9708cca4d0b13c4f67146e3c3b7cf8d34018c722adb2d957c84"}, + {file = "numpy-2.2.6-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:287cc3162b6f01463ccd86be154f284d0893d2b3ed7292439ea97eafa8170e0b"}, + {file = "numpy-2.2.6-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:f1372f041402e37e5e633e586f62aa53de2eac8d98cbfb822806ce4bbefcb74d"}, + {file = "numpy-2.2.6-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:55a4d33fa519660d69614a9fad433be87e5252f4b03850642f88993f7b2ca566"}, + {file = "numpy-2.2.6-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f92729c95468a2f4f15e9bb94c432a9229d0d50de67304399627a943201baa2f"}, + {file = "numpy-2.2.6-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1bc23a79bfabc5d056d106f9befb8d50c31ced2fbc70eedb8155aec74a45798f"}, + {file = "numpy-2.2.6-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:e3143e4451880bed956e706a3220b4e5cf6172ef05fcc397f6f36a550b1dd868"}, + {file = "numpy-2.2.6-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:b4f13750ce79751586ae2eb824ba7e1e8dba64784086c98cdbbcc6a42112ce0d"}, + {file = "numpy-2.2.6-cp313-cp313-win32.whl", hash = "sha256:5beb72339d9d4fa36522fc63802f469b13cdbe4fdab4a288f0c441b74272ebfd"}, + {file = "numpy-2.2.6-cp313-cp313-win_amd64.whl", hash = "sha256:b0544343a702fa80c95ad5d3d608ea3599dd54d4632df855e4c8d24eb6ecfa1c"}, + {file = "numpy-2.2.6-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:0bca768cd85ae743b2affdc762d617eddf3bcf8724435498a1e80132d04879e6"}, + {file = "numpy-2.2.6-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:fc0c5673685c508a142ca65209b4e79ed6740a4ed6b2267dbba90f34b0b3cfda"}, + {file = "numpy-2.2.6-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:5bd4fc3ac8926b3819797a7c0e2631eb889b4118a9898c84f585a54d475b7e40"}, + {file = "numpy-2.2.6-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:fee4236c876c4e8369388054d02d0e9bb84821feb1a64dd59e137e6511a551f8"}, + {file = "numpy-2.2.6-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e1dda9c7e08dc141e0247a5b8f49cf05984955246a327d4c48bda16821947b2f"}, + {file = "numpy-2.2.6-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f447e6acb680fd307f40d3da4852208af94afdfab89cf850986c3ca00562f4fa"}, + {file = "numpy-2.2.6-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:389d771b1623ec92636b0786bc4ae56abafad4a4c513d36a55dce14bd9ce8571"}, + {file = "numpy-2.2.6-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:8e9ace4a37db23421249ed236fdcdd457d671e25146786dfc96835cd951aa7c1"}, + {file = "numpy-2.2.6-cp313-cp313t-win32.whl", hash = "sha256:038613e9fb8c72b0a41f025a7e4c3f0b7a1b5d768ece4796b674c8f3fe13efff"}, + {file = "numpy-2.2.6-cp313-cp313t-win_amd64.whl", hash = "sha256:6031dd6dfecc0cf9f668681a37648373bddd6421fff6c66ec1624eed0180ee06"}, + {file = "numpy-2.2.6-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:0b605b275d7bd0c640cad4e5d30fa701a8d59302e127e5f79138ad62762c3e3d"}, + {file = "numpy-2.2.6-pp310-pypy310_pp73-macosx_14_0_x86_64.whl", hash = "sha256:7befc596a7dc9da8a337f79802ee8adb30a552a94f792b9c9d18c840055907db"}, + {file = "numpy-2.2.6-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce47521a4754c8f4593837384bd3424880629f718d87c5d44f8ed763edd63543"}, + {file = "numpy-2.2.6-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:d042d24c90c41b54fd506da306759e06e568864df8ec17ccc17e9e884634fd00"}, + {file = "numpy-2.2.6.tar.gz", hash = "sha256:e29554e2bef54a90aa5cc07da6ce955accb83f21ab5de01a62c8478897b264fd"}, ] [[package]] @@ -1533,14 +1629,14 @@ xmp = ["defusedxml"] [[package]] name = "platformdirs" -version = "4.3.7" +version = "4.3.8" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "platformdirs-4.3.7-py3-none-any.whl", hash = "sha256:a03875334331946f13c549dbd8f4bac7a13a50a895a0eb1e8c6a8ace80d40a94"}, - {file = "platformdirs-4.3.7.tar.gz", hash = "sha256:eb437d586b6a0986388f0d6f74aa0cde27b48d0e3d66843640bfb6bdcdb6e351"}, + {file = "platformdirs-4.3.8-py3-none-any.whl", hash = "sha256:ff7059bb7eb1179e2685604f4aaf157cfd9535242bd23742eadc3c13542139b4"}, + {file = "platformdirs-4.3.8.tar.gz", hash = "sha256:3d512d96e16bcb959a814c9f348431070822a6496326a4be0911c40b5a74c2bc"}, ] [package.extras] @@ -1550,39 +1646,63 @@ type = ["mypy (>=1.14.1)"] [[package]] name = "pluggy" -version = "1.5.0" +version = "1.6.0" description = "plugin and hook calling mechanisms for python" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" groups = ["main"] files = [ - {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, - {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, + {file = "pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746"}, + {file = "pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3"}, ] [package.extras] dev = ["pre-commit", "tox"] -testing = ["pytest", "pytest-benchmark"] +testing = ["coverage", "pytest", "pytest-benchmark"] [[package]] name = "protobuf" -version = "6.30.2" +version = "6.31.1" description = "" optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "protobuf-6.30.2-cp310-abi3-win32.whl", hash = "sha256:b12ef7df7b9329886e66404bef5e9ce6a26b54069d7f7436a0853ccdeb91c103"}, - {file = "protobuf-6.30.2-cp310-abi3-win_amd64.whl", hash = "sha256:7653c99774f73fe6b9301b87da52af0e69783a2e371e8b599b3e9cb4da4b12b9"}, - {file = "protobuf-6.30.2-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:0eb523c550a66a09a0c20f86dd554afbf4d32b02af34ae53d93268c1f73bc65b"}, - {file = "protobuf-6.30.2-cp39-abi3-manylinux2014_aarch64.whl", hash = "sha256:50f32cc9fd9cb09c783ebc275611b4f19dfdfb68d1ee55d2f0c7fa040df96815"}, - {file = "protobuf-6.30.2-cp39-abi3-manylinux2014_x86_64.whl", hash = "sha256:4f6c687ae8efae6cf6093389a596548214467778146b7245e886f35e1485315d"}, - {file = "protobuf-6.30.2-cp39-cp39-win32.whl", hash = "sha256:524afedc03b31b15586ca7f64d877a98b184f007180ce25183d1a5cb230ee72b"}, - {file = "protobuf-6.30.2-cp39-cp39-win_amd64.whl", hash = "sha256:acec579c39c88bd8fbbacab1b8052c793efe83a0a5bd99db4a31423a25c0a0e2"}, - {file = "protobuf-6.30.2-py3-none-any.whl", hash = "sha256:ae86b030e69a98e08c77beab574cbcb9fff6d031d57209f574a5aea1445f4b51"}, - {file = "protobuf-6.30.2.tar.gz", hash = "sha256:35c859ae076d8c56054c25b59e5e59638d86545ed6e2b6efac6be0b6ea3ba048"}, + {file = "protobuf-6.31.1-cp310-abi3-win32.whl", hash = "sha256:7fa17d5a29c2e04b7d90e5e32388b8bfd0e7107cd8e616feef7ed3fa6bdab5c9"}, + {file = "protobuf-6.31.1-cp310-abi3-win_amd64.whl", hash = "sha256:426f59d2964864a1a366254fa703b8632dcec0790d8862d30034d8245e1cd447"}, + {file = "protobuf-6.31.1-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:6f1227473dc43d44ed644425268eb7c2e488ae245d51c6866d19fe158e207402"}, + {file = "protobuf-6.31.1-cp39-abi3-manylinux2014_aarch64.whl", hash = "sha256:a40fc12b84c154884d7d4c4ebd675d5b3b5283e155f324049ae396b95ddebc39"}, + {file = "protobuf-6.31.1-cp39-abi3-manylinux2014_x86_64.whl", hash = "sha256:4ee898bf66f7a8b0bd21bce523814e6fbd8c6add948045ce958b73af7e8878c6"}, + {file = "protobuf-6.31.1-cp39-cp39-win32.whl", hash = "sha256:0414e3aa5a5f3ff423828e1e6a6e907d6c65c1d5b7e6e975793d5590bdeecc16"}, + {file = "protobuf-6.31.1-cp39-cp39-win_amd64.whl", hash = "sha256:8764cf4587791e7564051b35524b72844f845ad0bb011704c3736cce762d8fe9"}, + {file = "protobuf-6.31.1-py3-none-any.whl", hash = "sha256:720a6c7e6b77288b85063569baae8536671b39f15cc22037ec7045658d80489e"}, + {file = "protobuf-6.31.1.tar.gz", hash = "sha256:d8cac4c982f0b957a4dc73a80e2ea24fab08e679c0de9deb835f4a12d69aca9a"}, ] +[[package]] +name = "psutil" +version = "7.0.0" +description = "Cross-platform lib for process and system monitoring in Python. NOTE: the syntax of this script MUST be kept compatible with Python 2.7." +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "psutil-7.0.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:101d71dc322e3cffd7cea0650b09b3d08b8e7c4109dd6809fe452dfd00e58b25"}, + {file = "psutil-7.0.0-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:39db632f6bb862eeccf56660871433e111b6ea58f2caea825571951d4b6aa3da"}, + {file = "psutil-7.0.0-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1fcee592b4c6f146991ca55919ea3d1f8926497a713ed7faaf8225e174581e91"}, + {file = "psutil-7.0.0-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b1388a4f6875d7e2aff5c4ca1cc16c545ed41dd8bb596cefea80111db353a34"}, + {file = "psutil-7.0.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5f098451abc2828f7dc6b58d44b532b22f2088f4999a937557b603ce72b1993"}, + {file = "psutil-7.0.0-cp36-cp36m-win32.whl", hash = "sha256:84df4eb63e16849689f76b1ffcb36db7b8de703d1bc1fe41773db487621b6c17"}, + {file = "psutil-7.0.0-cp36-cp36m-win_amd64.whl", hash = "sha256:1e744154a6580bc968a0195fd25e80432d3afec619daf145b9e5ba16cc1d688e"}, + {file = "psutil-7.0.0-cp37-abi3-win32.whl", hash = "sha256:ba3fcef7523064a6c9da440fc4d6bd07da93ac726b5733c29027d7dc95b39d99"}, + {file = "psutil-7.0.0-cp37-abi3-win_amd64.whl", hash = "sha256:4cf3d4eb1aa9b348dec30105c55cd9b7d4629285735a102beb4441e38db90553"}, + {file = "psutil-7.0.0.tar.gz", hash = "sha256:7be9c3eba38beccb6495ea33afd982a44074b78f28c434a1f51cc07fd315c456"}, +] + +[package.extras] +dev = ["abi3audit", "black (==24.10.0)", "check-manifest", "coverage", "packaging", "pylint", "pyperf", "pypinfo", "pytest", "pytest-cov", "pytest-xdist", "requests", "rstcheck", "ruff", "setuptools", "sphinx", "sphinx_rtd_theme", "toml-sort", "twine", "virtualenv", "vulture", "wheel"] +test = ["pytest", "pytest-xdist", "setuptools"] + [[package]] name = "pygments" version = "2.19.1" @@ -1638,111 +1758,121 @@ six = "*" [[package]] name = "pyobjc-core" -version = "11.0" +version = "11.1" description = "Python<->ObjC Interoperability Module" optional = false python-versions = ">=3.8" groups = ["main"] markers = "sys_platform == \"darwin\"" files = [ - {file = "pyobjc_core-11.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:10866b3a734d47caf48e456eea0d4815c2c9b21856157db5917b61dee06893a1"}, - {file = "pyobjc_core-11.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:50675c0bb8696fe960a28466f9baf6943df2928a1fd85625d678fa2f428bd0bd"}, - {file = "pyobjc_core-11.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:a03061d4955c62ddd7754224a80cdadfdf17b6b5f60df1d9169a3b1b02923f0b"}, - {file = "pyobjc_core-11.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:c338c1deb7ab2e9436d4175d1127da2eeed4a1b564b3d83b9f3ae4844ba97e86"}, - {file = "pyobjc_core-11.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:b4e9dc4296110f251a4033ff3f40320b35873ea7f876bd29a1c9705bb5e08c59"}, - {file = "pyobjc_core-11.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:02406ece449d0f41b31e579e47ca77ced3eb57533df955281bfcecc99da74fba"}, - {file = "pyobjc_core-11.0.tar.gz", hash = "sha256:63bced211cb8a8fb5c8ff46473603da30e51112861bd02c438fbbbc8578d9a70"}, + {file = "pyobjc_core-11.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4c7536f3e94de0a3eae6bb382d75f1219280aa867cdf37beef39d9e7d580173c"}, + {file = "pyobjc_core-11.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ec36680b5c14e2f73d432b03ba7c1457dc6ca70fa59fd7daea1073f2b4157d33"}, + {file = "pyobjc_core-11.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:765b97dea6b87ec4612b3212258024d8496ea23517c95a1c5f0735f96b7fd529"}, + {file = "pyobjc_core-11.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:18986f83998fbd5d3f56d8a8428b2f3e0754fd15cef3ef786ca0d29619024f2c"}, + {file = "pyobjc_core-11.1-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:8849e78cfe6595c4911fbba29683decfb0bf57a350aed8a43316976ba6f659d2"}, + {file = "pyobjc_core-11.1-cp314-cp314-macosx_11_0_universal2.whl", hash = "sha256:8cb9ed17a8d84a312a6e8b665dd22393d48336ea1d8277e7ad20c19a38edf731"}, + {file = "pyobjc_core-11.1-cp314-cp314t-macosx_11_0_universal2.whl", hash = "sha256:f2455683e807f8541f0d83fbba0f5d9a46128ab0d5cc83ea208f0bec759b7f96"}, + {file = "pyobjc_core-11.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:4a99e6558b48b8e47c092051e7b3be05df1c8d0617b62f6fa6a316c01902d157"}, + {file = "pyobjc_core-11.1.tar.gz", hash = "sha256:b63d4d90c5df7e762f34739b39cc55bc63dbcf9fb2fb3f2671e528488c7a87fe"}, ] [[package]] name = "pyobjc-framework-applicationservices" -version = "11.0" +version = "11.1" description = "Wrappers for the framework ApplicationServices on macOS" optional = false python-versions = ">=3.9" groups = ["main"] markers = "sys_platform == \"darwin\"" files = [ - {file = "pyobjc_framework_ApplicationServices-11.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:bc8f34b5b59ffd3c210ae883d794345c1197558ff3da0f5800669cf16435271e"}, - {file = "pyobjc_framework_ApplicationServices-11.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:61a99eef23abb704257310db4f5271137707e184768f6407030c01de4731b67b"}, - {file = "pyobjc_framework_ApplicationServices-11.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:5fbeb425897d6129471d451ec61a29ddd5b1386eb26b1dd49cb313e34616ee21"}, - {file = "pyobjc_framework_ApplicationServices-11.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:59becf3cd87a4f4cedf4be02ff6cf46ed736f5c1123ce629f788aaafad91eff0"}, - {file = "pyobjc_framework_ApplicationServices-11.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:44b466e8745fb49e8ac20f29f2ffd7895b45e97aa63a844b2a80a97c3a34346f"}, - {file = "pyobjc_framework_ApplicationServices-11.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:74963e15a751d1454c1b8060914f116956e3a68f6a117c2163f491609125283b"}, - {file = "pyobjc_framework_applicationservices-11.0.tar.gz", hash = "sha256:d6ea18dfc7d5626a3ecf4ac72d510405c0d3a648ca38cae8db841acdebecf4d2"}, + {file = "pyobjc_framework_applicationservices-11.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:89aa713f16f1de66efd82f3be77c632ad1068e51e0ef0c2b0237ac7c7f580814"}, + {file = "pyobjc_framework_applicationservices-11.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:cf45d15eddae36dec2330a9992fc852476b61c8f529874b9ec2805c768a75482"}, + {file = "pyobjc_framework_applicationservices-11.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:f4a85ccd78bab84f7f05ac65ff9be117839dfc09d48c39edd65c617ed73eb01c"}, + {file = "pyobjc_framework_applicationservices-11.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:385a89f4d0838c97a331e247519d9e9745aa3f7427169d18570e3c664076a63c"}, + {file = "pyobjc_framework_applicationservices-11.1-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:f480fab20f3005e559c9d06c9a3874a1f1c60dde52c6d28a53ab59b45e79d55f"}, + {file = "pyobjc_framework_applicationservices-11.1-cp314-cp314-macosx_11_0_universal2.whl", hash = "sha256:e8dee91c6a14fd042f98819dc0ac4a182e0e816282565534032f0e544bfab143"}, + {file = "pyobjc_framework_applicationservices-11.1-cp314-cp314t-macosx_11_0_universal2.whl", hash = "sha256:a0ce40a57a9b993793b6f72c4fd93f80618ef54a69d76a1da97b8360a2f3ffc5"}, + {file = "pyobjc_framework_applicationservices-11.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:ba671fc6b695de69b2ed5e350b09cc1806f39352e8ad07635c94ef17730f6fe0"}, + {file = "pyobjc_framework_applicationservices-11.1.tar.gz", hash = "sha256:03fcd8c0c600db98fa8b85eb7b3bc31491701720c795e3f762b54e865138bbaf"}, ] [package.dependencies] -pyobjc-core = ">=11.0" -pyobjc-framework-Cocoa = ">=11.0" -pyobjc-framework-CoreText = ">=11.0" -pyobjc-framework-Quartz = ">=11.0" +pyobjc-core = ">=11.1" +pyobjc-framework-Cocoa = ">=11.1" +pyobjc-framework-CoreText = ">=11.1" +pyobjc-framework-Quartz = ">=11.1" [[package]] name = "pyobjc-framework-cocoa" -version = "11.0" +version = "11.1" description = "Wrappers for the Cocoa frameworks on macOS" optional = false python-versions = ">=3.9" groups = ["main"] markers = "sys_platform == \"darwin\"" files = [ - {file = "pyobjc_framework_Cocoa-11.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:fbc65f260d617d5463c7fb9dbaaffc23c9a4fabfe3b1a50b039b61870b8daefd"}, - {file = "pyobjc_framework_Cocoa-11.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:3ea7be6e6dd801b297440de02d312ba3fa7fd3c322db747ae1cb237e975f5d33"}, - {file = "pyobjc_framework_Cocoa-11.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:280a577b83c68175a28b2b7138d1d2d3111f2b2b66c30e86f81a19c2b02eae71"}, - {file = "pyobjc_framework_Cocoa-11.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:15b2bd977ed340074f930f1330f03d42912d5882b697d78bd06f8ebe263ef92e"}, - {file = "pyobjc_framework_Cocoa-11.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:5750001db544e67f2b66f02067d8f0da96bb2ef71732bde104f01b8628f9d7ea"}, - {file = "pyobjc_framework_Cocoa-11.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:ddff25b0755d59873d186e1e07d6aaddb19d55e3ae890d69ff2d9babf8627657"}, - {file = "pyobjc_framework_cocoa-11.0.tar.gz", hash = "sha256:00346a8cb81ad7b017b32ff7bf596000f9faa905807b1bd234644ebd47f692c5"}, + {file = "pyobjc_framework_cocoa-11.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:b27a5bdb3ab6cdeb998443ff3fce194ffae5f518c6a079b832dbafc4426937f9"}, + {file = "pyobjc_framework_cocoa-11.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:7b9a9b8ba07f5bf84866399e3de2aa311ed1c34d5d2788a995bdbe82cc36cfa0"}, + {file = "pyobjc_framework_cocoa-11.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:806de56f06dfba8f301a244cce289d54877c36b4b19818e3b53150eb7c2424d0"}, + {file = "pyobjc_framework_cocoa-11.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:54e93e1d9b0fc41c032582a6f0834befe1d418d73893968f3f450281b11603da"}, + {file = "pyobjc_framework_cocoa-11.1-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:fd5245ee1997d93e78b72703be1289d75d88ff6490af94462b564892e9266350"}, + {file = "pyobjc_framework_cocoa-11.1-cp314-cp314-macosx_11_0_universal2.whl", hash = "sha256:aede53a1afc5433e1e7d66568cc52acceeb171b0a6005407a42e8e82580b4fc0"}, + {file = "pyobjc_framework_cocoa-11.1-cp314-cp314t-macosx_11_0_universal2.whl", hash = "sha256:1b5de4e1757bb65689d6dc1f8d8717de9ec8587eb0c4831c134f13aba29f9b71"}, + {file = "pyobjc_framework_cocoa-11.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:bbee71eeb93b1b31ffbac8560b59a0524a8a4b90846a260d2c4f2188f3d4c721"}, + {file = "pyobjc_framework_cocoa-11.1.tar.gz", hash = "sha256:87df76b9b73e7ca699a828ff112564b59251bb9bbe72e610e670a4dc9940d038"}, ] [package.dependencies] -pyobjc-core = ">=11.0" +pyobjc-core = ">=11.1" [[package]] name = "pyobjc-framework-coretext" -version = "11.0" +version = "11.1" description = "Wrappers for the framework CoreText on macOS" optional = false python-versions = ">=3.9" groups = ["main"] markers = "sys_platform == \"darwin\"" files = [ - {file = "pyobjc_framework_CoreText-11.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:6939b4ea745b349b5c964823a2071f155f5defdc9b9fc3a13f036d859d7d0439"}, - {file = "pyobjc_framework_CoreText-11.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:56a4889858308b0d9f147d568b4d91c441cc0ffd332497cb4f709bb1990450c1"}, - {file = "pyobjc_framework_CoreText-11.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:fb90e7f370b3fd7cb2fb442e3dc63fedf0b4af6908db1c18df694d10dc94669d"}, - {file = "pyobjc_framework_CoreText-11.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:7947f755782456bd663e0b00c7905eeffd10f839f0bf2af031f68ded6a1ea360"}, - {file = "pyobjc_framework_CoreText-11.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:5356116bae33ec49f1f212c301378a7d08000440a2d6a7281aab351945528ab9"}, - {file = "pyobjc_framework_CoreText-11.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:4a76e1307747f2ee8180d38844cd62b8bb1701b4203d9234cc41f6603d4ae654"}, - {file = "pyobjc_framework_coretext-11.0.tar.gz", hash = "sha256:a68437153e627847e3898754dd3f13ae0cb852246b016a91f9c9cbccb9f91a43"}, + {file = "pyobjc_framework_coretext-11.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:515be6beb48c084ee413c00c4e9fbd6e730c1b8a24270f4c618fc6c7ba0011ce"}, + {file = "pyobjc_framework_coretext-11.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:b4f4d2d2a6331fa64465247358d7aafce98e4fb654b99301a490627a073d021e"}, + {file = "pyobjc_framework_coretext-11.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:1597bf7234270ee1b9963bf112e9061050d5fb8e1384b3f50c11bde2fe2b1570"}, + {file = "pyobjc_framework_coretext-11.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:37e051e8f12a0f47a81b8efc8c902156eb5bc3d8123c43e5bd4cebd24c222228"}, + {file = "pyobjc_framework_coretext-11.1-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:56a3a02202e0d50be3c43e781c00f9f1859ab9b73a8342ff56260b908e911e37"}, + {file = "pyobjc_framework_coretext-11.1-cp314-cp314-macosx_11_0_universal2.whl", hash = "sha256:15650ba99692d00953e91e53118c11636056a22c90d472020f7ba31500577bf5"}, + {file = "pyobjc_framework_coretext-11.1-cp314-cp314t-macosx_11_0_universal2.whl", hash = "sha256:fb27f66a56660c31bb956191d64b85b95bac99cfb833f6e99622ca0ac4b3ba12"}, + {file = "pyobjc_framework_coretext-11.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7fee99a1ac96e3f70d482731bc39a546da82a58f87fa9f0e2b784a5febaff33d"}, + {file = "pyobjc_framework_coretext-11.1.tar.gz", hash = "sha256:a29bbd5d85c77f46a8ee81d381b847244c88a3a5a96ac22f509027ceceaffaf6"}, ] [package.dependencies] -pyobjc-core = ">=11.0" -pyobjc-framework-Cocoa = ">=11.0" -pyobjc-framework-Quartz = ">=11.0" +pyobjc-core = ">=11.1" +pyobjc-framework-Cocoa = ">=11.1" +pyobjc-framework-Quartz = ">=11.1" [[package]] name = "pyobjc-framework-quartz" -version = "11.0" +version = "11.1" description = "Wrappers for the Quartz frameworks on macOS" optional = false python-versions = ">=3.9" groups = ["main"] markers = "sys_platform == \"darwin\"" files = [ - {file = "pyobjc_framework_Quartz-11.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:da3ab13c9f92361959b41b0ad4cdd41ae872f90a6d8c58a9ed699bc08ab1c45c"}, - {file = "pyobjc_framework_Quartz-11.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:d251696bfd8e8ef72fbc90eb29fec95cb9d1cc409008a183d5cc3246130ae8c2"}, - {file = "pyobjc_framework_Quartz-11.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:cb4a9f2d9d580ea15e25e6b270f47681afb5689cafc9e25712445ce715bcd18e"}, - {file = "pyobjc_framework_Quartz-11.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:973b4f9b8ab844574461a038bd5269f425a7368d6e677e3cc81fcc9b27b65498"}, - {file = "pyobjc_framework_Quartz-11.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:66ab58d65348863b8707e63b2ec5cdc54569ee8189d1af90d52f29f5fdf6272c"}, - {file = "pyobjc_framework_Quartz-11.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:1032f63f2a4ee98366764e69c249f1d93813821e17d224cf626cf11fb1801fc4"}, - {file = "pyobjc_framework_quartz-11.0.tar.gz", hash = "sha256:3205bf7795fb9ae34747f701486b3db6dfac71924894d1f372977c4d70c3c619"}, + {file = "pyobjc_framework_quartz-11.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:b5ef75c416b0209e25b2eb07a27bd7eedf14a8c6b2f968711969d45ceceb0f84"}, + {file = "pyobjc_framework_quartz-11.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:2d501fe95ef15d8acf587cb7dc4ab4be3c5a84e2252017da8dbb7df1bbe7a72a"}, + {file = "pyobjc_framework_quartz-11.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:9ac806067541917d6119b98d90390a6944e7d9bd737f5c0a79884202327c9204"}, + {file = "pyobjc_framework_quartz-11.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:43a1138280571bbf44df27a7eef519184b5c4183a588598ebaaeb887b9e73e76"}, + {file = "pyobjc_framework_quartz-11.1-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:b23d81c30c564adf6336e00b357f355b35aad10075dd7e837cfd52a9912863e5"}, + {file = "pyobjc_framework_quartz-11.1-cp314-cp314-macosx_11_0_universal2.whl", hash = "sha256:07cbda78b4a8fcf3a2d96e047a2ff01f44e3e1820f46f0f4b3b6d77ff6ece07c"}, + {file = "pyobjc_framework_quartz-11.1-cp314-cp314t-macosx_11_0_universal2.whl", hash = "sha256:39d02a3df4b5e3eee1e0da0fb150259476910d2a9aa638ab94153c24317a9561"}, + {file = "pyobjc_framework_quartz-11.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:9b1f451ddb5243d8d6316af55f240a02b0fffbfe165bff325628bf73f3df7f44"}, + {file = "pyobjc_framework_quartz-11.1.tar.gz", hash = "sha256:a57f35ccfc22ad48c87c5932818e583777ff7276605fef6afad0ac0741169f75"}, ] [package.dependencies] -pyobjc-core = ">=11.0" -pyobjc-framework-Cocoa = ">=11.0" +pyobjc-core = ">=11.1" +pyobjc-framework-Cocoa = ">=11.1" [[package]] name = "pyparsing" @@ -1761,26 +1891,47 @@ diagrams = ["jinja2", "railroad-diagrams"] [[package]] name = "pytest" -version = "8.3.5" +version = "8.4.1" description = "pytest: simple powerful testing with Python" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" groups = ["main"] files = [ - {file = "pytest-8.3.5-py3-none-any.whl", hash = "sha256:c69214aa47deac29fad6c2a4f590b9c4a9fdb16a403176fe154b79c0b4d4d820"}, - {file = "pytest-8.3.5.tar.gz", hash = "sha256:f4efe70cc14e511565ac476b57c279e12a855b11f48f212af1080ef2263d3845"}, + {file = "pytest-8.4.1-py3-none-any.whl", hash = "sha256:539c70ba6fcead8e78eebbf1115e8b589e7565830d7d006a8723f19ac8a0afb7"}, + {file = "pytest-8.4.1.tar.gz", hash = "sha256:7c67fd69174877359ed9371ec3af8a3d2b04741818c51e5e99cc1742251fa93c"}, ] [package.dependencies] -colorama = {version = "*", markers = "sys_platform == \"win32\""} -exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} -iniconfig = "*" -packaging = "*" +colorama = {version = ">=0.4", markers = "sys_platform == \"win32\""} +exceptiongroup = {version = ">=1", markers = "python_version < \"3.11\""} +iniconfig = ">=1" +packaging = ">=20" pluggy = ">=1.5,<2" +pygments = ">=2.7.2" tomli = {version = ">=1", markers = "python_version < \"3.11\""} [package.extras] -dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] +dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "requests", "setuptools", "xmlschema"] + +[[package]] +name = "pytest-cov" +version = "6.2.1" +description = "Pytest plugin for measuring coverage." +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "pytest_cov-6.2.1-py3-none-any.whl", hash = "sha256:f5bc4c23f42f1cdd23c70b1dab1bbaef4fc505ba950d53e0081d0730dd7e86d5"}, + {file = "pytest_cov-6.2.1.tar.gz", hash = "sha256:25cc6cc0a5358204b8108ecedc51a9b57b34cc6b8c967cc2c01a4e00d8a67da2"}, +] + +[package.dependencies] +coverage = {version = ">=7.5", extras = ["toml"]} +pluggy = ">=1.2" +pytest = ">=6.2.5" + +[package.extras] +testing = ["fields", "hunter", "process-tests", "pytest-xdist", "virtualenv"] [[package]] name = "python-dateutil" @@ -1878,14 +2029,14 @@ files = [ [[package]] name = "pyyaml-env-tag" -version = "0.1" -description = "A custom YAML tag for referencing environment variables in YAML files. " +version = "1.1" +description = "A custom YAML tag for referencing environment variables in YAML files." optional = false -python-versions = ">=3.6" +python-versions = ">=3.9" groups = ["main"] files = [ - {file = "pyyaml_env_tag-0.1-py3-none-any.whl", hash = "sha256:af31106dec8a4d68c60207c1886031cbf839b68aa7abccdb19868200532c2069"}, - {file = "pyyaml_env_tag-0.1.tar.gz", hash = "sha256:70092675bda14fdec33b31ba77e7543de9ddc88f2e5b99160396572d11525bdb"}, + {file = "pyyaml_env_tag-1.1-py3-none-any.whl", hash = "sha256:17109e1a528561e32f026364712fee1264bc2ea6715120891174ed1b980d2e04"}, + {file = "pyyaml_env_tag-1.1.tar.gz", hash = "sha256:2eb38b75a2d21ee0475d6d97ec19c63287a7e140231e4214969d0eac923cd7ff"}, ] [package.dependencies] @@ -1893,19 +2044,19 @@ pyyaml = "*" [[package]] name = "requests" -version = "2.32.3" +version = "2.32.4" description = "Python HTTP for Humans." optional = false python-versions = ">=3.8" groups = ["main"] files = [ - {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, - {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, + {file = "requests-2.32.4-py3-none-any.whl", hash = "sha256:27babd3cda2a6d50b30443204ee89830707d396671944c998b5975b031ac2b2c"}, + {file = "requests-2.32.4.tar.gz", hash = "sha256:27d0316682c8a29834d3264820024b62a36942083d52caf2f14c0591336d3422"}, ] [package.dependencies] certifi = ">=2017.4.17" -charset-normalizer = ">=2,<4" +charset_normalizer = ">=2,<4" idna = ">=2.5,<4" urllib3 = ">=1.21.1,<3" @@ -1915,58 +2066,58 @@ use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] [[package]] name = "scipy" -version = "1.15.2" +version = "1.15.3" description = "Fundamental algorithms for scientific computing in Python" optional = false python-versions = ">=3.10" groups = ["main"] files = [ - {file = "scipy-1.15.2-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:a2ec871edaa863e8213ea5df811cd600734f6400b4af272e1c011e69401218e9"}, - {file = "scipy-1.15.2-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:6f223753c6ea76983af380787611ae1291e3ceb23917393079dcc746ba60cfb5"}, - {file = "scipy-1.15.2-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:ecf797d2d798cf7c838c6d98321061eb3e72a74710e6c40540f0e8087e3b499e"}, - {file = "scipy-1.15.2-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:9b18aa747da280664642997e65aab1dd19d0c3d17068a04b3fe34e2559196cb9"}, - {file = "scipy-1.15.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:87994da02e73549dfecaed9e09a4f9d58a045a053865679aeb8d6d43747d4df3"}, - {file = "scipy-1.15.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:69ea6e56d00977f355c0f84eba69877b6df084516c602d93a33812aa04d90a3d"}, - {file = "scipy-1.15.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:888307125ea0c4466287191e5606a2c910963405ce9671448ff9c81c53f85f58"}, - {file = "scipy-1.15.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:9412f5e408b397ff5641080ed1e798623dbe1ec0d78e72c9eca8992976fa65aa"}, - {file = "scipy-1.15.2-cp310-cp310-win_amd64.whl", hash = "sha256:b5e025e903b4f166ea03b109bb241355b9c42c279ea694d8864d033727205e65"}, - {file = "scipy-1.15.2-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:92233b2df6938147be6fa8824b8136f29a18f016ecde986666be5f4d686a91a4"}, - {file = "scipy-1.15.2-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:62ca1ff3eb513e09ed17a5736929429189adf16d2d740f44e53270cc800ecff1"}, - {file = "scipy-1.15.2-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:4c6676490ad76d1c2894d77f976144b41bd1a4052107902238047fb6a473e971"}, - {file = "scipy-1.15.2-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:a8bf5cb4a25046ac61d38f8d3c3426ec11ebc350246a4642f2f315fe95bda655"}, - {file = "scipy-1.15.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a8e34cf4c188b6dd004654f88586d78f95639e48a25dfae9c5e34a6dc34547e"}, - {file = "scipy-1.15.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:28a0d2c2075946346e4408b211240764759e0fabaeb08d871639b5f3b1aca8a0"}, - {file = "scipy-1.15.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:42dabaaa798e987c425ed76062794e93a243be8f0f20fff6e7a89f4d61cb3d40"}, - {file = "scipy-1.15.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6f5e296ec63c5da6ba6fa0343ea73fd51b8b3e1a300b0a8cae3ed4b1122c7462"}, - {file = "scipy-1.15.2-cp311-cp311-win_amd64.whl", hash = "sha256:597a0c7008b21c035831c39927406c6181bcf8f60a73f36219b69d010aa04737"}, - {file = "scipy-1.15.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:c4697a10da8f8765bb7c83e24a470da5797e37041edfd77fd95ba3811a47c4fd"}, - {file = "scipy-1.15.2-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:869269b767d5ee7ea6991ed7e22b3ca1f22de73ab9a49c44bad338b725603301"}, - {file = "scipy-1.15.2-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:bad78d580270a4d32470563ea86c6590b465cb98f83d760ff5b0990cb5518a93"}, - {file = "scipy-1.15.2-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:b09ae80010f52efddb15551025f9016c910296cf70adbf03ce2a8704f3a5ad20"}, - {file = "scipy-1.15.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5a6fd6eac1ce74a9f77a7fc724080d507c5812d61e72bd5e4c489b042455865e"}, - {file = "scipy-1.15.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2b871df1fe1a3ba85d90e22742b93584f8d2b8e6124f8372ab15c71b73e428b8"}, - {file = "scipy-1.15.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:03205d57a28e18dfd39f0377d5002725bf1f19a46f444108c29bdb246b6c8a11"}, - {file = "scipy-1.15.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:601881dfb761311045b03114c5fe718a12634e5608c3b403737ae463c9885d53"}, - {file = "scipy-1.15.2-cp312-cp312-win_amd64.whl", hash = "sha256:e7c68b6a43259ba0aab737237876e5c2c549a031ddb7abc28c7b47f22e202ded"}, - {file = "scipy-1.15.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:01edfac9f0798ad6b46d9c4c9ca0e0ad23dbf0b1eb70e96adb9fa7f525eff0bf"}, - {file = "scipy-1.15.2-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:08b57a9336b8e79b305a143c3655cc5bdbe6d5ece3378578888d2afbb51c4e37"}, - {file = "scipy-1.15.2-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:54c462098484e7466362a9f1672d20888f724911a74c22ae35b61f9c5919183d"}, - {file = "scipy-1.15.2-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:cf72ff559a53a6a6d77bd8eefd12a17995ffa44ad86c77a5df96f533d4e6c6bb"}, - {file = "scipy-1.15.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9de9d1416b3d9e7df9923ab23cd2fe714244af10b763975bea9e4f2e81cebd27"}, - {file = "scipy-1.15.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fb530e4794fc8ea76a4a21ccb67dea33e5e0e60f07fc38a49e821e1eae3b71a0"}, - {file = "scipy-1.15.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5ea7ed46d437fc52350b028b1d44e002646e28f3e8ddc714011aaf87330f2f32"}, - {file = "scipy-1.15.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:11e7ad32cf184b74380f43d3c0a706f49358b904fa7d5345f16ddf993609184d"}, - {file = "scipy-1.15.2-cp313-cp313-win_amd64.whl", hash = "sha256:a5080a79dfb9b78b768cebf3c9dcbc7b665c5875793569f48bf0e2b1d7f68f6f"}, - {file = "scipy-1.15.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:447ce30cee6a9d5d1379087c9e474628dab3db4a67484be1b7dc3196bfb2fac9"}, - {file = "scipy-1.15.2-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:c90ebe8aaa4397eaefa8455a8182b164a6cc1d59ad53f79943f266d99f68687f"}, - {file = "scipy-1.15.2-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:def751dd08243934c884a3221156d63e15234a3155cf25978b0a668409d45eb6"}, - {file = "scipy-1.15.2-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:302093e7dfb120e55515936cb55618ee0b895f8bcaf18ff81eca086c17bd80af"}, - {file = "scipy-1.15.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7cd5b77413e1855351cdde594eca99c1f4a588c2d63711388b6a1f1c01f62274"}, - {file = "scipy-1.15.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d0194c37037707b2afa7a2f2a924cf7bac3dc292d51b6a925e5fcb89bc5c776"}, - {file = "scipy-1.15.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:bae43364d600fdc3ac327db99659dcb79e6e7ecd279a75fe1266669d9a652828"}, - {file = "scipy-1.15.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:f031846580d9acccd0044efd1a90e6f4df3a6e12b4b6bd694a7bc03a89892b28"}, - {file = "scipy-1.15.2-cp313-cp313t-win_amd64.whl", hash = "sha256:fe8a9eb875d430d81755472c5ba75e84acc980e4a8f6204d402849234d3017db"}, - {file = "scipy-1.15.2.tar.gz", hash = "sha256:cd58a314d92838f7e6f755c8a2167ead4f27e1fd5c1251fd54289569ef3495ec"}, + {file = "scipy-1.15.3-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:a345928c86d535060c9c2b25e71e87c39ab2f22fc96e9636bd74d1dbf9de448c"}, + {file = "scipy-1.15.3-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:ad3432cb0f9ed87477a8d97f03b763fd1d57709f1bbde3c9369b1dff5503b253"}, + {file = "scipy-1.15.3-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:aef683a9ae6eb00728a542b796f52a5477b78252edede72b8327a886ab63293f"}, + {file = "scipy-1.15.3-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:1c832e1bd78dea67d5c16f786681b28dd695a8cb1fb90af2e27580d3d0967e92"}, + {file = "scipy-1.15.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:263961f658ce2165bbd7b99fa5135195c3a12d9bef045345016b8b50c315cb82"}, + {file = "scipy-1.15.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e2abc762b0811e09a0d3258abee2d98e0c703eee49464ce0069590846f31d40"}, + {file = "scipy-1.15.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:ed7284b21a7a0c8f1b6e5977ac05396c0d008b89e05498c8b7e8f4a1423bba0e"}, + {file = "scipy-1.15.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:5380741e53df2c566f4d234b100a484b420af85deb39ea35a1cc1be84ff53a5c"}, + {file = "scipy-1.15.3-cp310-cp310-win_amd64.whl", hash = "sha256:9d61e97b186a57350f6d6fd72640f9e99d5a4a2b8fbf4b9ee9a841eab327dc13"}, + {file = "scipy-1.15.3-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:993439ce220d25e3696d1b23b233dd010169b62f6456488567e830654ee37a6b"}, + {file = "scipy-1.15.3-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:34716e281f181a02341ddeaad584205bd2fd3c242063bd3423d61ac259ca7eba"}, + {file = "scipy-1.15.3-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:3b0334816afb8b91dab859281b1b9786934392aa3d527cd847e41bb6f45bee65"}, + {file = "scipy-1.15.3-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:6db907c7368e3092e24919b5e31c76998b0ce1684d51a90943cb0ed1b4ffd6c1"}, + {file = "scipy-1.15.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:721d6b4ef5dc82ca8968c25b111e307083d7ca9091bc38163fb89243e85e3889"}, + {file = "scipy-1.15.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:39cb9c62e471b1bb3750066ecc3a3f3052b37751c7c3dfd0fd7e48900ed52982"}, + {file = "scipy-1.15.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:795c46999bae845966368a3c013e0e00947932d68e235702b5c3f6ea799aa8c9"}, + {file = "scipy-1.15.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:18aaacb735ab38b38db42cb01f6b92a2d0d4b6aabefeb07f02849e47f8fb3594"}, + {file = "scipy-1.15.3-cp311-cp311-win_amd64.whl", hash = "sha256:ae48a786a28412d744c62fd7816a4118ef97e5be0bee968ce8f0a2fba7acf3bb"}, + {file = "scipy-1.15.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:6ac6310fdbfb7aa6612408bd2f07295bcbd3fda00d2d702178434751fe48e019"}, + {file = "scipy-1.15.3-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:185cd3d6d05ca4b44a8f1595af87f9c372bb6acf9c808e99aa3e9aa03bd98cf6"}, + {file = "scipy-1.15.3-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:05dc6abcd105e1a29f95eada46d4a3f251743cfd7d3ae8ddb4088047f24ea477"}, + {file = "scipy-1.15.3-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:06efcba926324df1696931a57a176c80848ccd67ce6ad020c810736bfd58eb1c"}, + {file = "scipy-1.15.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c05045d8b9bfd807ee1b9f38761993297b10b245f012b11b13b91ba8945f7e45"}, + {file = "scipy-1.15.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:271e3713e645149ea5ea3e97b57fdab61ce61333f97cfae392c28ba786f9bb49"}, + {file = "scipy-1.15.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6cfd56fc1a8e53f6e89ba3a7a7251f7396412d655bca2aa5611c8ec9a6784a1e"}, + {file = "scipy-1.15.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:0ff17c0bb1cb32952c09217d8d1eed9b53d1463e5f1dd6052c7857f83127d539"}, + {file = "scipy-1.15.3-cp312-cp312-win_amd64.whl", hash = "sha256:52092bc0472cfd17df49ff17e70624345efece4e1a12b23783a1ac59a1b728ed"}, + {file = "scipy-1.15.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:2c620736bcc334782e24d173c0fdbb7590a0a436d2fdf39310a8902505008759"}, + {file = "scipy-1.15.3-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:7e11270a000969409d37ed399585ee530b9ef6aa99d50c019de4cb01e8e54e62"}, + {file = "scipy-1.15.3-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:8c9ed3ba2c8a2ce098163a9bdb26f891746d02136995df25227a20e71c396ebb"}, + {file = "scipy-1.15.3-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:0bdd905264c0c9cfa74a4772cdb2070171790381a5c4d312c973382fc6eaf730"}, + {file = "scipy-1.15.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79167bba085c31f38603e11a267d862957cbb3ce018d8b38f79ac043bc92d825"}, + {file = "scipy-1.15.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c9deabd6d547aee2c9a81dee6cc96c6d7e9a9b1953f74850c179f91fdc729cb7"}, + {file = "scipy-1.15.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:dde4fc32993071ac0c7dd2d82569e544f0bdaff66269cb475e0f369adad13f11"}, + {file = "scipy-1.15.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f77f853d584e72e874d87357ad70f44b437331507d1c311457bed8ed2b956126"}, + {file = "scipy-1.15.3-cp313-cp313-win_amd64.whl", hash = "sha256:b90ab29d0c37ec9bf55424c064312930ca5f4bde15ee8619ee44e69319aab163"}, + {file = "scipy-1.15.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:3ac07623267feb3ae308487c260ac684b32ea35fd81e12845039952f558047b8"}, + {file = "scipy-1.15.3-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:6487aa99c2a3d509a5227d9a5e889ff05830a06b2ce08ec30df6d79db5fcd5c5"}, + {file = "scipy-1.15.3-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:50f9e62461c95d933d5c5ef4a1f2ebf9a2b4e83b0db374cb3f1de104d935922e"}, + {file = "scipy-1.15.3-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:14ed70039d182f411ffc74789a16df3835e05dc469b898233a245cdfd7f162cb"}, + {file = "scipy-1.15.3-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a769105537aa07a69468a0eefcd121be52006db61cdd8cac8a0e68980bbb723"}, + {file = "scipy-1.15.3-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9db984639887e3dffb3928d118145ffe40eff2fa40cb241a306ec57c219ebbbb"}, + {file = "scipy-1.15.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:40e54d5c7e7ebf1aa596c374c49fa3135f04648a0caabcb66c52884b943f02b4"}, + {file = "scipy-1.15.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:5e721fed53187e71d0ccf382b6bf977644c533e506c4d33c3fb24de89f5c3ed5"}, + {file = "scipy-1.15.3-cp313-cp313t-win_amd64.whl", hash = "sha256:76ad1fb5f8752eabf0fa02e4cc0336b4e8f021e2d5f061ed37d6d264db35e3ca"}, + {file = "scipy-1.15.3.tar.gz", hash = "sha256:eae3cf522bc7df64b42cad3925c876e1b0b6c35c1337c93e12c0f366f55b0eaf"}, ] [package.dependencies] @@ -1974,19 +2125,19 @@ numpy = ">=1.23.5,<2.5" [package.extras] dev = ["cython-lint (>=0.12.2)", "doit (>=0.36.0)", "mypy (==1.10.0)", "pycodestyle", "pydevtool", "rich-click", "ruff (>=0.0.292)", "types-psutil", "typing_extensions"] -doc = ["intersphinx_registry", "jupyterlite-pyodide-kernel", "jupyterlite-sphinx (>=0.16.5)", "jupytext", "matplotlib (>=3.5)", "myst-nb", "numpydoc", "pooch", "pydata-sphinx-theme (>=0.15.2)", "sphinx (>=5.0.0,<8.0.0)", "sphinx-copybutton", "sphinx-design (>=0.4.0)"] +doc = ["intersphinx_registry", "jupyterlite-pyodide-kernel", "jupyterlite-sphinx (>=0.19.1)", "jupytext", "matplotlib (>=3.5)", "myst-nb", "numpydoc", "pooch", "pydata-sphinx-theme (>=0.15.2)", "sphinx (>=5.0.0,<8.0.0)", "sphinx-copybutton", "sphinx-design (>=0.4.0)"] test = ["Cython", "array-api-strict (>=2.0,<2.1.1)", "asv", "gmpy2", "hypothesis (>=6.30)", "meson", "mpmath", "ninja", "pooch", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"] [[package]] name = "setuptools" -version = "80.3.1" +version = "80.9.0" description = "Easily download, build, install, upgrade, and uninstall Python packages" optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "setuptools-80.3.1-py3-none-any.whl", hash = "sha256:ea8e00d7992054c4c592aeb892f6ad51fe1b4d90cc6947cc45c45717c40ec537"}, - {file = "setuptools-80.3.1.tar.gz", hash = "sha256:31e2c58dbb67c99c289f51c16d899afedae292b978f8051efaf6262d8212f927"}, + {file = "setuptools-80.9.0-py3-none-any.whl", hash = "sha256:062d34222ad13e0cc312a4c02d73f059e86a4acbfbdea8f8f76b28c99f306922"}, + {file = "setuptools-80.9.0.tar.gz", hash = "sha256:f36b47402ecde768dbfafc46e8e4207b4360c654f1f3bb84475f0a28628fb19c"}, ] [package.extras] @@ -2000,53 +2151,53 @@ type = ["importlib_metadata (>=7.0.2)", "jaraco.develop (>=7.21)", "mypy (==1.14 [[package]] name = "shapely" -version = "2.1.0" +version = "2.1.1" description = "Manipulation and analysis of geometric objects" optional = false python-versions = ">=3.10" groups = ["main"] files = [ - {file = "shapely-2.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d3e5c5e3864d4dc431dd85a8e5137ebd39c8ac287b009d3fa80a07017b29c940"}, - {file = "shapely-2.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d6eea89b16f5f3a064659126455d23fa3066bc3d6cd385c35214f06bf5871aa6"}, - {file = "shapely-2.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:183174ad0b21a81ee661f05e7c47aa92ebfae01814cd3cbe54adea7a4213f5f4"}, - {file = "shapely-2.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f239c1484af66bc14b81a76f2a8e0fada29d59010423253ff857d0ccefdaa93f"}, - {file = "shapely-2.1.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:6220a466d1475141dad0cd8065d2549a5c2ed3fa4e2e02fb8ea65d494cfd5b07"}, - {file = "shapely-2.1.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:4822d3ed3efb06145c34d29d5b56792f72b7d713300f603bfd5d825892c6f79f"}, - {file = "shapely-2.1.0-cp310-cp310-win32.whl", hash = "sha256:ea51ddf3d3c60866dca746081b56c75f34ff1b01acbd4d44269071a673c735b9"}, - {file = "shapely-2.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:a6f5e02e2cded9f4ec5709900a296c7f2cce5f8e9e9d80ba7d89ae2f4ed89d7b"}, - {file = "shapely-2.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c8323031ef7c1bdda7a92d5ddbc7b6b62702e73ba37e9a8ccc8da99ec2c0b87c"}, - {file = "shapely-2.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4da7c6cd748d86ec6aace99ad17129d30954ccf5e73e9911cdb5f0fa9658b4f8"}, - {file = "shapely-2.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f0cdf85ff80831137067e7a237085a3ee72c225dba1b30beef87f7d396cf02b"}, - {file = "shapely-2.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:41f2be5d79aac39886f23000727cf02001aef3af8810176c29ee12cdc3ef3a50"}, - {file = "shapely-2.1.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:21a4515009f56d7a159cf5c2554264e82f56405b4721f9a422cb397237c5dca8"}, - {file = "shapely-2.1.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:15cebc323cec2cb6b2eaa310fdfc621f6dbbfaf6bde336d13838fcea76c885a9"}, - {file = "shapely-2.1.0-cp311-cp311-win32.whl", hash = "sha256:cad51b7a5c8f82f5640472944a74f0f239123dde9a63042b3c5ea311739b7d20"}, - {file = "shapely-2.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:d4005309dde8658e287ad9c435c81877f6a95a9419b932fa7a1f34b120f270ae"}, - {file = "shapely-2.1.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:53e7ee8bd8609cf12ee6dce01ea5affe676976cf7049315751d53d8db6d2b4b2"}, - {file = "shapely-2.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3cab20b665d26dbec0b380e15749bea720885a481fa7b1eedc88195d4a98cfa4"}, - {file = "shapely-2.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f4a38b39a09340273c3c92b3b9a374272a12cc7e468aeeea22c1c46217a03e5c"}, - {file = "shapely-2.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:edaec656bdd9b71278b98e6f77c464b1c3b2daa9eace78012ff0f0b4b5b15b04"}, - {file = "shapely-2.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:c8a732ddd9b25e7a54aa748e7df8fd704e23e5d5d35b7d376d80bffbfc376d04"}, - {file = "shapely-2.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:9c93693ad8adfdc9138a5a2d42da02da94f728dd2e82d2f0f442f10e25027f5f"}, - {file = "shapely-2.1.0-cp312-cp312-win32.whl", hash = "sha256:d8ac6604eefe807e71a908524de23a37920133a1729fe3a4dfe0ed82c044cbf4"}, - {file = "shapely-2.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:f4f47e631aa4f9ec5576eac546eb3f38802e2f82aeb0552f9612cb9a14ece1db"}, - {file = "shapely-2.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b64423295b563f43a043eb786e7a03200ebe68698e36d2b4b1c39f31dfb50dfb"}, - {file = "shapely-2.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1b5578f45adc25b235b22d1ccb9a0348c8dc36f31983e57ea129a88f96f7b870"}, - {file = "shapely-2.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d1a7e83d383b27f02b684e50ab7f34e511c92e33b6ca164a6a9065705dd64bcb"}, - {file = "shapely-2.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:942031eb4d8f7b3b22f43ba42c09c7aa3d843aa10d5cc1619fe816e923b66e55"}, - {file = "shapely-2.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:d2843c456a2e5627ee6271800f07277c0d2652fb287bf66464571a057dbc00b3"}, - {file = "shapely-2.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8c4b17469b7f39a5e6a7cfea79f38ae08a275427f41fe8b48c372e1449147908"}, - {file = "shapely-2.1.0-cp313-cp313-win32.whl", hash = "sha256:30e967abd08fce49513d4187c01b19f139084019f33bec0673e8dbeb557c45e4"}, - {file = "shapely-2.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:1dc8d4364483a14aba4c844b7bd16a6fa3728887e2c33dfa1afa34a3cf4d08a5"}, - {file = "shapely-2.1.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:673e073fea099d1c82f666fb7ab0a00a77eff2999130a69357ce11941260d855"}, - {file = "shapely-2.1.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:6d1513f915a56de67659fe2047c1ad5ff0f8cbff3519d1e74fced69c9cb0e7da"}, - {file = "shapely-2.1.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d6a7043178890b9e028d80496ff4c79dc7629bff4d78a2f25323b661756bab8"}, - {file = "shapely-2.1.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cb638378dc3d76f7e85b67d7e2bb1366811912430ac9247ac00c127c2b444cdc"}, - {file = "shapely-2.1.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:737124e87d91d616acf9a911f74ac55e05db02a43a6a7245b3d663817b876055"}, - {file = "shapely-2.1.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:8e6c229e7bb87aae5df82fa00b6718987a43ec168cc5affe095cca59d233f314"}, - {file = "shapely-2.1.0-cp313-cp313t-win32.whl", hash = "sha256:a9580bda119b1f42f955aa8e52382d5c73f7957e0203bc0c0c60084846f3db94"}, - {file = "shapely-2.1.0-cp313-cp313t-win_amd64.whl", hash = "sha256:e8ff4e5cfd799ba5b6f37b5d5527dbd85b4a47c65b6d459a03d0962d2a9d4d10"}, - {file = "shapely-2.1.0.tar.gz", hash = "sha256:2cbe90e86fa8fc3ca8af6ffb00a77b246b918c7cf28677b7c21489b678f6b02e"}, + {file = "shapely-2.1.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d8ccc872a632acb7bdcb69e5e78df27213f7efd195882668ffba5405497337c6"}, + {file = "shapely-2.1.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f24f2ecda1e6c091da64bcbef8dd121380948074875bd1b247b3d17e99407099"}, + {file = "shapely-2.1.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45112a5be0b745b49e50f8829ce490eb67fefb0cea8d4f8ac5764bfedaa83d2d"}, + {file = "shapely-2.1.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8c10ce6f11904d65e9bbb3e41e774903c944e20b3f0b282559885302f52f224a"}, + {file = "shapely-2.1.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:61168010dfe4e45f956ffbbaf080c88afce199ea81eb1f0ac43230065df320bd"}, + {file = "shapely-2.1.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:cacf067cdff741cd5c56a21c52f54ece4e4dad9d311130493a791997da4a886b"}, + {file = "shapely-2.1.1-cp310-cp310-win32.whl", hash = "sha256:23b8772c3b815e7790fb2eab75a0b3951f435bc0fce7bb146cb064f17d35ab4f"}, + {file = "shapely-2.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:2c7b2b6143abf4fa77851cef8ef690e03feade9a0d48acd6dc41d9e0e78d7ca6"}, + {file = "shapely-2.1.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:587a1aa72bc858fab9b8c20427b5f6027b7cbc92743b8e2c73b9de55aa71c7a7"}, + {file = "shapely-2.1.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9fa5c53b0791a4b998f9ad84aad456c988600757a96b0a05e14bba10cebaaaea"}, + {file = "shapely-2.1.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aabecd038841ab5310d23495253f01c2a82a3aedae5ab9ca489be214aa458aa7"}, + {file = "shapely-2.1.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:586f6aee1edec04e16227517a866df3e9a2e43c1f635efc32978bb3dc9c63753"}, + {file = "shapely-2.1.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:b9878b9e37ad26c72aada8de0c9cfe418d9e2ff36992a1693b7f65a075b28647"}, + {file = "shapely-2.1.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:d9a531c48f289ba355e37b134e98e28c557ff13965d4653a5228d0f42a09aed0"}, + {file = "shapely-2.1.1-cp311-cp311-win32.whl", hash = "sha256:4866de2673a971820c75c0167b1f1cd8fb76f2d641101c23d3ca021ad0449bab"}, + {file = "shapely-2.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:20a9d79958b3d6c70d8a886b250047ea32ff40489d7abb47d01498c704557a93"}, + {file = "shapely-2.1.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:2827365b58bf98efb60affc94a8e01c56dd1995a80aabe4b701465d86dcbba43"}, + {file = "shapely-2.1.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a9c551f7fa7f1e917af2347fe983f21f212863f1d04f08eece01e9c275903fad"}, + {file = "shapely-2.1.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:78dec4d4fbe7b1db8dc36de3031767e7ece5911fb7782bc9e95c5cdec58fb1e9"}, + {file = "shapely-2.1.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:872d3c0a7b8b37da0e23d80496ec5973c4692920b90de9f502b5beb994bbaaef"}, + {file = "shapely-2.1.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2e2b9125ebfbc28ecf5353511de62f75a8515ae9470521c9a693e4bb9fbe0cf1"}, + {file = "shapely-2.1.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:4b96cea171b3d7f6786976a0520f178c42792897653ecca0c5422fb1e6946e6d"}, + {file = "shapely-2.1.1-cp312-cp312-win32.whl", hash = "sha256:39dca52201e02996df02e447f729da97cfb6ff41a03cb50f5547f19d02905af8"}, + {file = "shapely-2.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:13d643256f81d55a50013eff6321142781cf777eb6a9e207c2c9e6315ba6044a"}, + {file = "shapely-2.1.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:3004a644d9e89e26c20286d5fdc10f41b1744c48ce910bd1867fdff963fe6c48"}, + {file = "shapely-2.1.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1415146fa12d80a47d13cfad5310b3c8b9c2aa8c14a0c845c9d3d75e77cb54f6"}, + {file = "shapely-2.1.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:21fcab88b7520820ec16d09d6bea68652ca13993c84dffc6129dc3607c95594c"}, + {file = "shapely-2.1.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e5ce6a5cc52c974b291237a96c08c5592e50f066871704fb5b12be2639d9026a"}, + {file = "shapely-2.1.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:04e4c12a45a1d70aeb266618d8cf81a2de9c4df511b63e105b90bfdfb52146de"}, + {file = "shapely-2.1.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6ca74d851ca5264aae16c2b47e96735579686cb69fa93c4078070a0ec845b8d8"}, + {file = "shapely-2.1.1-cp313-cp313-win32.whl", hash = "sha256:fd9130501bf42ffb7e0695b9ea17a27ae8ce68d50b56b6941c7f9b3d3453bc52"}, + {file = "shapely-2.1.1-cp313-cp313-win_amd64.whl", hash = "sha256:ab8d878687b438a2f4c138ed1a80941c6ab0029e0f4c785ecfe114413b498a97"}, + {file = "shapely-2.1.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:0c062384316a47f776305ed2fa22182717508ffdeb4a56d0ff4087a77b2a0f6d"}, + {file = "shapely-2.1.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:4ecf6c196b896e8f1360cc219ed4eee1c1e5f5883e505d449f263bd053fb8c05"}, + {file = "shapely-2.1.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb00070b4c4860f6743c600285109c273cca5241e970ad56bb87bef0be1ea3a0"}, + {file = "shapely-2.1.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d14a9afa5fa980fbe7bf63706fdfb8ff588f638f145a1d9dbc18374b5b7de913"}, + {file = "shapely-2.1.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:b640e390dabde790e3fb947198b466e63223e0a9ccd787da5f07bcb14756c28d"}, + {file = "shapely-2.1.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:69e08bf9697c1b73ec6aa70437db922bafcea7baca131c90c26d59491a9760f9"}, + {file = "shapely-2.1.1-cp313-cp313t-win32.whl", hash = "sha256:ef2d09d5a964cc90c2c18b03566cf918a61c248596998a0301d5b632beadb9db"}, + {file = "shapely-2.1.1-cp313-cp313t-win_amd64.whl", hash = "sha256:8cb8f17c377260452e9d7720eeaf59082c5f8ea48cf104524d953e5d36d4bdb7"}, + {file = "shapely-2.1.1.tar.gz", hash = "sha256:500621967f2ffe9642454808009044c21e5b35db89ce69f8a2042c2ffd0e2772"}, ] [package.dependencies] @@ -2144,7 +2295,7 @@ description = "A lil' TOML parser" optional = false python-versions = ">=3.8" groups = ["main"] -markers = "python_version < \"3.11\"" +markers = "python_full_version <= \"3.11.0a6\"" files = [ {file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"}, {file = "tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6"}, @@ -2182,36 +2333,36 @@ files = [ [[package]] name = "torch" -version = "2.7.0" +version = "2.7.1" description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration" optional = false python-versions = ">=3.9.0" groups = ["main"] files = [ - {file = "torch-2.7.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:c9afea41b11e1a1ab1b258a5c31afbd646d6319042bfe4f231b408034b51128b"}, - {file = "torch-2.7.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:0b9960183b6e5b71239a3e6c883d8852c304e691c0b2955f7045e8a6d05b9183"}, - {file = "torch-2.7.0-cp310-cp310-win_amd64.whl", hash = "sha256:2ad79d0d8c2a20a37c5df6052ec67c2078a2c4e9a96dd3a8b55daaff6d28ea29"}, - {file = "torch-2.7.0-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:34e0168ed6de99121612d72224e59b2a58a83dae64999990eada7260c5dd582d"}, - {file = "torch-2.7.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:2b7813e904757b125faf1a9a3154e1d50381d539ced34da1992f52440567c156"}, - {file = "torch-2.7.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:fd5cfbb4c3bbadd57ad1b27d56a28008f8d8753733411a140fcfb84d7f933a25"}, - {file = "torch-2.7.0-cp311-cp311-win_amd64.whl", hash = "sha256:58df8d5c2eeb81305760282b5069ea4442791a6bbf0c74d9069b7b3304ff8a37"}, - {file = "torch-2.7.0-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:0a8d43caa342b9986101ec5feb5bbf1d86570b5caa01e9cb426378311258fdde"}, - {file = "torch-2.7.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:36a6368c7ace41ad1c0f69f18056020b6a5ca47bedaca9a2f3b578f5a104c26c"}, - {file = "torch-2.7.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:15aab3e31c16feb12ae0a88dba3434a458874636f360c567caa6a91f6bfba481"}, - {file = "torch-2.7.0-cp312-cp312-win_amd64.whl", hash = "sha256:f56d4b2510934e072bab3ab8987e00e60e1262fb238176168f5e0c43a1320c6d"}, - {file = "torch-2.7.0-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:30b7688a87239a7de83f269333651d8e582afffce6f591fff08c046f7787296e"}, - {file = "torch-2.7.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:868ccdc11798535b5727509480cd1d86d74220cfdc42842c4617338c1109a205"}, - {file = "torch-2.7.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:9b52347118116cf3dff2ab5a3c3dd97c719eb924ac658ca2a7335652076df708"}, - {file = "torch-2.7.0-cp313-cp313-win_amd64.whl", hash = "sha256:434cf3b378340efc87c758f250e884f34460624c0523fe5c9b518d205c91dd1b"}, - {file = "torch-2.7.0-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:edad98dddd82220465b106506bb91ee5ce32bd075cddbcf2b443dfaa2cbd83bf"}, - {file = "torch-2.7.0-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:2a885fc25afefb6e6eb18a7d1e8bfa01cc153e92271d980a49243b250d5ab6d9"}, - {file = "torch-2.7.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:176300ff5bc11a5f5b0784e40bde9e10a35c4ae9609beed96b4aeb46a27f5fae"}, - {file = "torch-2.7.0-cp313-cp313t-win_amd64.whl", hash = "sha256:d0ca446a93f474985d81dc866fcc8dccefb9460a29a456f79d99c29a78a66993"}, - {file = "torch-2.7.0-cp313-none-macosx_11_0_arm64.whl", hash = "sha256:27f5007bdf45f7bb7af7f11d1828d5c2487e030690afb3d89a651fd7036a390e"}, - {file = "torch-2.7.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:e362efaa5b3078e5f75c33efc05005b9b46de0d2e899519d5b4cad0e050ed0f7"}, - {file = "torch-2.7.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:fc1ed9258cbfce69970ff508ea60881818d414d098a800b7695ba36f570d34b0"}, - {file = "torch-2.7.0-cp39-cp39-win_amd64.whl", hash = "sha256:87b0802cab44659fcb6bcf5678d58fa4a8b48561cde8fb2d317edf0b6990e1bb"}, - {file = "torch-2.7.0-cp39-none-macosx_11_0_arm64.whl", hash = "sha256:ccd7509141713997861b7a947ef0a717143cd7e9240addd168f38ba8fd23fd56"}, + {file = "torch-2.7.1-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:a103b5d782af5bd119b81dbcc7ffc6fa09904c423ff8db397a1e6ea8fd71508f"}, + {file = "torch-2.7.1-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:fe955951bdf32d182ee8ead6c3186ad54781492bf03d547d31771a01b3d6fb7d"}, + {file = "torch-2.7.1-cp310-cp310-win_amd64.whl", hash = "sha256:885453d6fba67d9991132143bf7fa06b79b24352f4506fd4d10b309f53454162"}, + {file = "torch-2.7.1-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:d72acfdb86cee2a32c0ce0101606f3758f0d8bb5f8f31e7920dc2809e963aa7c"}, + {file = "torch-2.7.1-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:236f501f2e383f1cb861337bdf057712182f910f10aeaf509065d54d339e49b2"}, + {file = "torch-2.7.1-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:06eea61f859436622e78dd0cdd51dbc8f8c6d76917a9cf0555a333f9eac31ec1"}, + {file = "torch-2.7.1-cp311-cp311-win_amd64.whl", hash = "sha256:8273145a2e0a3c6f9fd2ac36762d6ee89c26d430e612b95a99885df083b04e52"}, + {file = "torch-2.7.1-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:aea4fc1bf433d12843eb2c6b2204861f43d8364597697074c8d38ae2507f8730"}, + {file = "torch-2.7.1-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:27ea1e518df4c9de73af7e8a720770f3628e7f667280bce2be7a16292697e3fa"}, + {file = "torch-2.7.1-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:c33360cfc2edd976c2633b3b66c769bdcbbf0e0b6550606d188431c81e7dd1fc"}, + {file = "torch-2.7.1-cp312-cp312-win_amd64.whl", hash = "sha256:d8bf6e1856ddd1807e79dc57e54d3335f2b62e6f316ed13ed3ecfe1fc1df3d8b"}, + {file = "torch-2.7.1-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:787687087412c4bd68d315e39bc1223f08aae1d16a9e9771d95eabbb04ae98fb"}, + {file = "torch-2.7.1-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:03563603d931e70722dce0e11999d53aa80a375a3d78e6b39b9f6805ea0a8d28"}, + {file = "torch-2.7.1-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:d632f5417b6980f61404a125b999ca6ebd0b8b4bbdbb5fbbba44374ab619a412"}, + {file = "torch-2.7.1-cp313-cp313-win_amd64.whl", hash = "sha256:23660443e13995ee93e3d844786701ea4ca69f337027b05182f5ba053ce43b38"}, + {file = "torch-2.7.1-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:0da4f4dba9f65d0d203794e619fe7ca3247a55ffdcbd17ae8fb83c8b2dc9b585"}, + {file = "torch-2.7.1-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:e08d7e6f21a617fe38eeb46dd2213ded43f27c072e9165dc27300c9ef9570934"}, + {file = "torch-2.7.1-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:30207f672328a42df4f2174b8f426f354b2baa0b7cca3a0adb3d6ab5daf00dc8"}, + {file = "torch-2.7.1-cp313-cp313t-win_amd64.whl", hash = "sha256:79042feca1c634aaf6603fe6feea8c6b30dfa140a6bbc0b973e2260c7e79a22e"}, + {file = "torch-2.7.1-cp313-none-macosx_11_0_arm64.whl", hash = "sha256:988b0cbc4333618a1056d2ebad9eb10089637b659eb645434d0809d8d937b946"}, + {file = "torch-2.7.1-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:e0d81e9a12764b6f3879a866607c8ae93113cbcad57ce01ebde63eb48a576369"}, + {file = "torch-2.7.1-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:8394833c44484547ed4a47162318337b88c97acdb3273d85ea06e03ffff44998"}, + {file = "torch-2.7.1-cp39-cp39-win_amd64.whl", hash = "sha256:df41989d9300e6e3c19ec9f56f856187a6ef060c3662fe54f4b6baf1fc90bd19"}, + {file = "torch-2.7.1-cp39-none-macosx_11_0_arm64.whl", hash = "sha256:a737b5edd1c44a5c1ece2e9f3d00df9d1b3fb9541138bee56d83d38293fb6c9d"}, ] [package.dependencies] @@ -2235,7 +2386,7 @@ nvidia-nvjitlink-cu12 = {version = "12.6.85", markers = "platform_system == \"Li nvidia-nvtx-cu12 = {version = "12.6.77", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} setuptools = {version = "*", markers = "python_version >= \"3.12\""} sympy = ">=1.13.3" -triton = {version = "3.3.0", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +triton = {version = "3.3.1", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} typing-extensions = ">=4.10.0" [package.extras] @@ -2266,19 +2417,19 @@ telegram = ["requests"] [[package]] name = "triton" -version = "3.3.0" +version = "3.3.1" description = "A language and compiler for custom Deep Learning operations" optional = false python-versions = "*" groups = ["main"] markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" files = [ - {file = "triton-3.3.0-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fad99beafc860501d7fcc1fb7045d9496cbe2c882b1674640304949165a916e7"}, - {file = "triton-3.3.0-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3161a2bf073d6b22c4e2f33f951f3e5e3001462b2570e6df9cd57565bdec2984"}, - {file = "triton-3.3.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b68c778f6c4218403a6bd01be7484f6dc9e20fe2083d22dd8aef33e3b87a10a3"}, - {file = "triton-3.3.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:47bc87ad66fa4ef17968299acacecaab71ce40a238890acc6ad197c3abe2b8f1"}, - {file = "triton-3.3.0-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ce4700fc14032af1e049005ae94ba908e71cd6c2df682239aed08e49bc71b742"}, - {file = "triton-3.3.0-cp39-cp39-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1f41403bfa0cbb3e24fd958ca7fee04e9681e55e539296db9aca30c42acae693"}, + {file = "triton-3.3.1-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b74db445b1c562844d3cfad6e9679c72e93fdfb1a90a24052b03bb5c49d1242e"}, + {file = "triton-3.3.1-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b31e3aa26f8cb3cc5bf4e187bf737cbacf17311e1112b781d4a059353dfd731b"}, + {file = "triton-3.3.1-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9999e83aba21e1a78c1f36f21bce621b77bcaa530277a50484a7cb4a822f6e43"}, + {file = "triton-3.3.1-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b89d846b5a4198317fec27a5d3a609ea96b6d557ff44b56c23176546023c4240"}, + {file = "triton-3.3.1-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a3198adb9d78b77818a5388bff89fa72ff36f9da0bc689db2f0a651a67ce6a42"}, + {file = "triton-3.3.1-cp39-cp39-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f6139aeb04a146b0b8e0fbbd89ad1e65861c57cfed881f21d62d3cb94a36bab7"}, ] [package.dependencies] @@ -2291,26 +2442,26 @@ tutorials = ["matplotlib", "pandas", "tabulate"] [[package]] name = "typing-extensions" -version = "4.13.2" -description = "Backported and Experimental Type Hints for Python 3.8+" +version = "4.14.0" +description = "Backported and Experimental Type Hints for Python 3.9+" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" groups = ["main"] files = [ - {file = "typing_extensions-4.13.2-py3-none-any.whl", hash = "sha256:a439e7c04b49fec3e5d3e2beaa21755cadbbdc391694e28ccdd36ca4a1408f8c"}, - {file = "typing_extensions-4.13.2.tar.gz", hash = "sha256:e6c81219bd689f51865d9e372991c540bda33a0379d5573cddb9a3a23f7caaef"}, + {file = "typing_extensions-4.14.0-py3-none-any.whl", hash = "sha256:a1514509136dd0b477638fc68d6a91497af5076466ad0fa6c338e44e359944af"}, + {file = "typing_extensions-4.14.0.tar.gz", hash = "sha256:8676b788e32f02ab42d9e7c61324048ae4c6d844a399eebace3d4979d75ceef4"}, ] [[package]] name = "urllib3" -version = "2.4.0" +version = "2.5.0" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "urllib3-2.4.0-py3-none-any.whl", hash = "sha256:4e16665048960a0900c702d4a66415956a584919c03361cac9f1df5c5dd7e813"}, - {file = "urllib3-2.4.0.tar.gz", hash = "sha256:414bc6535b787febd7567804cc015fee39daab8ad86268f1310a9250697de466"}, + {file = "urllib3-2.5.0-py3-none-any.whl", hash = "sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc"}, + {file = "urllib3-2.5.0.tar.gz", hash = "sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760"}, ] [package.extras] diff --git a/pyproject.toml b/pyproject.toml index 3052ad3..c66e62e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,7 +16,7 @@ dependencies = [ "mkdocs (>=1.6.1,<2.0.0)", "mkdocstrings[python] (>=0.29.1,<0.30.0)", "mkdocs-material (>=9.6.11,<10.0.0)", - "scipy (>=1.15.2,<2.0.0)" + "scipy (>=1.15.2,<2.0.0)", ] [tool.pytest.ini_options] diff --git a/robot_nav/models/CNNTD3/att.py b/robot_nav/models/CNNTD3/att.py index ea6eef2..237e22d 100644 --- a/robot_nav/models/CNNTD3/att.py +++ b/robot_nav/models/CNNTD3/att.py @@ -443,7 +443,7 @@ def train( # else: # bce_loss = torch.tensor(0.0, device=masked_weights.device) - bce_weight = 0.1 + bce_weight = 0.0 av_critic_bce_loss.append(bce_loss) critic_entropy_weight = 1 # or tuneable @@ -473,7 +473,7 @@ def train( # else: # bce_loss = torch.tensor(0.0, device=masked_weights.device) - bce_weight = 0.1 + bce_weight = 0.0 av_actor_bce_loss.append(bce_loss) actor_Q, _, _, _, _, _ = self.critic(state, action) diff --git a/robot_nav/multi_train2.py b/robot_nav/multi_train2.py index 63c8e8e..fd7b0c9 100644 --- a/robot_nav/multi_train2.py +++ b/robot_nav/multi_train2.py @@ -57,7 +57,8 @@ def main(args=None): device=device, save_every=save_every, load_model=True, - model_name="CNNTD3", + model_name="phase2", + load_model_name="phase1" ) # instantiate a model replay_buffer = get_buffer( @@ -72,7 +73,10 @@ def main(args=None): con = torch.tensor([[0. for _ in range(sim.num_robots-1)] for _ in range(sim.num_robots) ]) poses, distance, cos, sin, collision, goal, a, reward, positions, goal_positions = sim.step([[0, 0] for _ in range(sim.num_robots)], con) # get the initial step state - + running_goals = 0 + running_collisions = 0 + running_timesteps = 0 + iter = 1 while epoch < max_epochs: # train until max_epochs is reached state, terminal = model.prepare_state( poses, distance, cos, sin, collision, goal, a, positions, goal_positions @@ -83,6 +87,9 @@ def main(args=None): a_in = [[(a[0] + 1) / 4, a[1]] for a in action] # clip linear velocity to [0, 0.5] m/s range poses, distance, cos, sin, collision, goal, a, reward, positions, goal_positions = sim.step(a_in, connection, combined_weights) # get data from the environment + running_goals += sum(goal) + running_collisions += sum(collision) + running_timesteps += 1 next_state, terminal = model.prepare_state( poses, distance, cos, sin, collision, goal, a, positions, goal_positions ) # get a next state representation @@ -96,6 +103,12 @@ def main(args=None): poses, distance, cos, sin, collision, goal, a, reward, positions, goal_positions = sim.reset() episode += 1 if episode % train_every_n == 0: + model.writer.add_scalar("run/avg_goal", running_goals/running_timesteps, iter) + model.writer.add_scalar("run/avg_collision", running_collisions / running_timesteps, iter) + running_goals = 0 + running_collisions = 0 + running_timesteps = 0 + iter += 1 model.train( replay_buffer=replay_buffer, iterations=training_iterations, @@ -106,12 +119,12 @@ def main(args=None): else: steps += 1 - if ( - episode + 1 - ) % episodes_per_epoch == 0: # if epoch is concluded, run evaluation - episode = 0 - epoch += 1 - # evaluate(model, epoch, sim, eval_episodes=nr_eval_episodes) + # if ( + # episode + 1 + # ) % episodes_per_epoch == 0: # if epoch is concluded, run evaluation + # episode = 0 + # epoch += 1 + # # evaluate(model, epoch, sim, eval_episodes=nr_eval_episodes) def evaluate(model, epoch, sim, eval_episodes=10): diff --git a/robot_nav/sim2.py b/robot_nav/sim2.py index 1bfcda5..508417f 100644 --- a/robot_nav/sim2.py +++ b/robot_nav/sim2.py @@ -234,21 +234,46 @@ def get_reward(goal, collision, action, closest_robots, distance): (float): Computed reward for the current state. """ # if goal: + # return 60.0 + # elif collision: + # return -100.0 + # else: + # cl_pen = 0 + # for rob in closest_robots: + # add = 1.5 - rob if rob < 1.5 else 0 + # cl_pen += add + # return -cl_pen + # r_dist = 1.25/distance + # cl_robot = min(closest_robots) + # cl_pen = 0 - cl_robot if cl_robot < 0 else 0 + # return 2*action[0] - abs(action[1]) - cl_pen + r_dist + + # phase1 + # if goal: # return 100.0 # elif collision: # return -100.0 # else: - # r_dist = 0.75/distance - # cl_robot = min(closest_robots) - # cl_pen = 0 - cl_robot if cl_robot < 0 else 0 - # return 2*action[0] - abs(action[1]) - cl_pen + r_dist + # r_dist = 1.5/distance + # cl_pen = 0 + # for rob in closest_robots: + # add = 1.5 - rob if rob < 1.5 else 0 + # cl_pen += add + # + # return action[0] - 0.5 * abs(action[1])-cl_pen + r_dist + + # phase2 if goal: - return 20.0 + return 80.0 elif collision: return -100.0 else: - r_dist = 0.1/distance - cl_robot = min(closest_robots) - cl_pen = 2.5 - cl_robot if cl_robot < 2.5 else 0 - return 2*action[0] - abs(action[1]) - cl_pen + r_dist + r_dist = 1.5/distance + cl_pen = 0 + for rob in closest_robots: + add = 1.5 - rob if rob < 1.5 else 0 + cl_pen += add + + return -0.5*abs(action[1])-cl_pen + From bc5e0ae3bc00ff46738deeb85e3d24d852dc92f1 Mon Sep 17 00:00:00 2001 From: reinis Date: Sun, 22 Jun 2025 11:21:44 +0200 Subject: [PATCH 03/12] working setup --- robot_nav/models/CNNTD3/att.py | 4 ++-- robot_nav/multi_train2.py | 4 ++-- robot_nav/sim2.py | 34 +++++++++++++++++----------------- 3 files changed, 21 insertions(+), 21 deletions(-) diff --git a/robot_nav/models/CNNTD3/att.py b/robot_nav/models/CNNTD3/att.py index 237e22d..ea6eef2 100644 --- a/robot_nav/models/CNNTD3/att.py +++ b/robot_nav/models/CNNTD3/att.py @@ -443,7 +443,7 @@ def train( # else: # bce_loss = torch.tensor(0.0, device=masked_weights.device) - bce_weight = 0.0 + bce_weight = 0.1 av_critic_bce_loss.append(bce_loss) critic_entropy_weight = 1 # or tuneable @@ -473,7 +473,7 @@ def train( # else: # bce_loss = torch.tensor(0.0, device=masked_weights.device) - bce_weight = 0.0 + bce_weight = 0.1 av_actor_bce_loss.append(bce_loss) actor_Q, _, _, _, _, _ = self.critic(state, action) diff --git a/robot_nav/multi_train2.py b/robot_nav/multi_train2.py index fd7b0c9..63eff71 100644 --- a/robot_nav/multi_train2.py +++ b/robot_nav/multi_train2.py @@ -56,8 +56,8 @@ def main(args=None): num_robots=sim.num_robots, device=device, save_every=save_every, - load_model=True, - model_name="phase2", + load_model=False, + model_name="phase1", load_model_name="phase1" ) # instantiate a model diff --git a/robot_nav/sim2.py b/robot_nav/sim2.py index 508417f..3ad5285 100644 --- a/robot_nav/sim2.py +++ b/robot_nav/sim2.py @@ -249,23 +249,8 @@ def get_reward(goal, collision, action, closest_robots, distance): # return 2*action[0] - abs(action[1]) - cl_pen + r_dist # phase1 - # if goal: - # return 100.0 - # elif collision: - # return -100.0 - # else: - # r_dist = 1.5/distance - # cl_pen = 0 - # for rob in closest_robots: - # add = 1.5 - rob if rob < 1.5 else 0 - # cl_pen += add - # - # return action[0] - 0.5 * abs(action[1])-cl_pen + r_dist - - - # phase2 if goal: - return 80.0 + return 100.0 elif collision: return -100.0 else: @@ -275,5 +260,20 @@ def get_reward(goal, collision, action, closest_robots, distance): add = 1.5 - rob if rob < 1.5 else 0 cl_pen += add - return -0.5*abs(action[1])-cl_pen + return action[0] - 0.5 * abs(action[1])-cl_pen + r_dist + + + # phase2 + # if goal: + # return 80.0 + # elif collision: + # return -100.0 + # else: + # r_dist = 1.5/distance + # cl_pen = 0 + # for rob in closest_robots: + # add = 1.5 - rob if rob < 1.5 else 0 + # cl_pen += add + # + # return -0.5*abs(action[1])-cl_pen From 62ded8e38f186268bc58cd5c4d6fdee4c1f12d76 Mon Sep 17 00:00:00 2001 From: reinis Date: Sun, 22 Jun 2025 22:21:01 +0200 Subject: [PATCH 04/12] working setup with k and v --- robot_nav/models/CNNTD3/att.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/robot_nav/models/CNNTD3/att.py b/robot_nav/models/CNNTD3/att.py index ea6eef2..1372e7d 100644 --- a/robot_nav/models/CNNTD3/att.py +++ b/robot_nav/models/CNNTD3/att.py @@ -32,12 +32,12 @@ def __init__(self, embedding_dim): # Soft attention projections self.q = nn.Linear(embedding_dim, embedding_dim, bias=False) - self.k = nn.Linear(embedding_dim, embedding_dim, bias=False) - self.v = nn.Linear(embedding_dim, embedding_dim) + self.k = nn.Linear(7, embedding_dim, bias=False) + self.v = nn.Linear(7, embedding_dim) # Soft attention score network (with distance) self.attn_score_layer = nn.Sequential( - nn.Linear(embedding_dim + 7, embedding_dim), + nn.Linear(embedding_dim *2, embedding_dim), nn.ReLU(), nn.Linear(embedding_dim, 1) ) @@ -132,9 +132,10 @@ def forward(self, embedding): mask[i] = False edge_i_wo_self = edge_features[:, i, mask, :] edge_i_wo_self = edge_i_wo_self.squeeze(1) # (B, N-1, 7) + k = F.leaky_relu(self.k(edge_i_wo_self)) q_i_expanded = q_i.expand(-1, n_agents - 1, -1) # (B, N-1, D) - attention_input = torch.cat([q_i_expanded, edge_i_wo_self], dim=-1) # (B, N-1, D+7) + attention_input = torch.cat([q_i_expanded, k], dim=-1) # (B, N-1, D+7) # Score computation scores = self.attn_score_layer(attention_input).transpose(1, 2) # (B, 1, N-1) @@ -166,7 +167,8 @@ def forward(self, embedding): entropy_list.append(entropy) # Project each other agent's features to embedding dim *before* the attention-weighted sum - v_j = self.v_proj(edge_i_wo_self) # (B, N-1, embedding_dim) + # v_j = self.v_proj(edge_i_wo_self) # (B, N-1, embedding_dim) + v_j = F.leaky_relu(self.v(edge_i_wo_self)) attn_output = torch.bmm(combined_weights, v_j).squeeze(1) # (B, embedding_dim) attention_outputs.append(attn_output) From 9c2f5eb13aae4ac66e988fdec4d7c739a60e4d04 Mon Sep 17 00:00:00 2001 From: reinis Date: Tue, 24 Jun 2025 21:55:48 +0200 Subject: [PATCH 05/12] update added noise --- robot_nav/models/CNNTD3/att.py | 4 +++- robot_nav/multi_train2.py | 6 ++--- robot_nav/sim2.py | 40 +++++++++++++++++++++++----------- 3 files changed, 33 insertions(+), 17 deletions(-) diff --git a/robot_nav/models/CNNTD3/att.py b/robot_nav/models/CNNTD3/att.py index 1372e7d..f4e0614 100644 --- a/robot_nav/models/CNNTD3/att.py +++ b/robot_nav/models/CNNTD3/att.py @@ -346,7 +346,9 @@ def get_action(self, obs, add_noise): """ action, connection, combined_weights = self.act(obs) if add_noise: - action = (action + np.random.normal(0, 0.1, size=action.shape) + noise = np.random.normal(0, 0.4, size=action.shape) + noise = [n/4 if i%2 else n for i, n in enumerate(noise)] + action = (action + noise ).clip(-self.max_action, self.max_action) return action.reshape(-1, 2), connection, combined_weights diff --git a/robot_nav/multi_train2.py b/robot_nav/multi_train2.py index 63eff71..29a4080 100644 --- a/robot_nav/multi_train2.py +++ b/robot_nav/multi_train2.py @@ -56,9 +56,9 @@ def main(args=None): num_robots=sim.num_robots, device=device, save_every=save_every, - load_model=False, - model_name="phase1", - load_model_name="phase1" + load_model=True, + model_name="phase3", + load_model_name="phase2" ) # instantiate a model replay_buffer = get_buffer( diff --git a/robot_nav/sim2.py b/robot_nav/sim2.py index 3ad5285..0ca9c6c 100644 --- a/robot_nav/sim2.py +++ b/robot_nav/sim2.py @@ -249,23 +249,23 @@ def get_reward(goal, collision, action, closest_robots, distance): # return 2*action[0] - abs(action[1]) - cl_pen + r_dist # phase1 - if goal: - return 100.0 - elif collision: - return -100.0 - else: - r_dist = 1.5/distance - cl_pen = 0 - for rob in closest_robots: - add = 1.5 - rob if rob < 1.5 else 0 - cl_pen += add - - return action[0] - 0.5 * abs(action[1])-cl_pen + r_dist + # if goal: + # return 100.0 + # elif collision: + # return -100.0 + # else: + # r_dist = 1.5/distance + # cl_pen = 0 + # for rob in closest_robots: + # add = 1.5 - rob if rob < 1.5 else 0 + # cl_pen += add + # + # return action[0] - 0.5 * abs(action[1])-cl_pen + r_dist # phase2 # if goal: - # return 80.0 + # return 100.0 # elif collision: # return -100.0 # else: @@ -277,3 +277,17 @@ def get_reward(goal, collision, action, closest_robots, distance): # # return -0.5*abs(action[1])-cl_pen + # phase3 + if goal: + return 100.0 + elif collision: + return -100.0 * 3 * action[0] + else: + r_dist = 1.5 / distance + cl_pen = 0 + for rob in closest_robots: + add = 1.5 - rob if rob < 1.5 else 0 + cl_pen += add + + return -0.5 * abs(action[1]) - cl_pen + From 7af52b37768f8833d50e94f2432dc119f32078c1 Mon Sep 17 00:00:00 2001 From: reinis Date: Fri, 27 Jun 2025 22:56:03 +0200 Subject: [PATCH 06/12] working phase 2 --- robot_nav/models/CNNTD3/att.py | 21 ++++++++++++--------- robot_nav/multi_train2.py | 6 +++--- robot_nav/sim2.py | 6 +++--- 3 files changed, 18 insertions(+), 15 deletions(-) diff --git a/robot_nav/models/CNNTD3/att.py b/robot_nav/models/CNNTD3/att.py index f4e0614..c2dc52a 100644 --- a/robot_nav/models/CNNTD3/att.py +++ b/robot_nav/models/CNNTD3/att.py @@ -32,8 +32,8 @@ def __init__(self, embedding_dim): # Soft attention projections self.q = nn.Linear(embedding_dim, embedding_dim, bias=False) - self.k = nn.Linear(7, embedding_dim, bias=False) - self.v = nn.Linear(7, embedding_dim) + self.k = nn.Linear(9, embedding_dim, bias=False) + self.v = nn.Linear(9, embedding_dim) # Soft attention score network (with distance) self.attn_score_layer = nn.Sequential( @@ -42,7 +42,6 @@ def __init__(self, embedding_dim): nn.Linear(embedding_dim, 1) ) - self.v_proj = nn.Linear(7, embedding_dim) # Decoder self.decode_1 = nn.Linear(embedding_dim * 2, embedding_dim * 2) nn.init.kaiming_uniform_(self.decode_1.weight, nonlinearity="leaky_relu") @@ -59,10 +58,14 @@ def forward(self, embedding): embedding = embedding.unsqueeze(0) batch_size, n_agents, _ = embedding.shape - embed = embedding[:, :, 4:].reshape(batch_size * n_agents, -1) + embed = embedding[:, :, 4:9].reshape(batch_size * n_agents, -1) position = embedding[:, :, :2].reshape(batch_size, n_agents, 2) heading = embedding[:, :, 2:4].reshape(batch_size, n_agents, 2) # assume (cos(θ), sin(θ)) - action = embedding[:, :, -2:].reshape(batch_size, n_agents, 2) + action = embedding[:, :, 7:9].reshape(batch_size, n_agents, 2) + goal = embedding[:, :, -2:].reshape(batch_size, n_agents, 2) + goal_j = goal.unsqueeze(1).expand(-1, n_agents, -1, -1) # (B, N, N, 2) + pos_i = position.unsqueeze(2) # (B, N, 1, 2) + rel_goal = goal_j - pos_i agent_embed = self.encode_agent_features(embed) agent_embed = agent_embed.view(batch_size, n_agents, self.embedding_dim) @@ -126,11 +129,12 @@ def forward(self, embedding): attention_outputs = [] entropy_list = [] combined_w = [] + soft_edge_features = torch.cat([edge_features, rel_goal], dim=-1) for i in range(n_agents): q_i = q[:, i:i + 1, :] # (B, 1, D) mask = torch.ones(n_agents, dtype=torch.bool, device=edge_features.device) mask[i] = False - edge_i_wo_self = edge_features[:, i, mask, :] + edge_i_wo_self = soft_edge_features[:, i, mask, :] edge_i_wo_self = edge_i_wo_self.squeeze(1) # (B, N-1, 7) k = F.leaky_relu(self.k(edge_i_wo_self)) @@ -167,7 +171,6 @@ def forward(self, embedding): entropy_list.append(entropy) # Project each other agent's features to embedding dim *before* the attention-weighted sum - # v_j = self.v_proj(edge_i_wo_self) # (B, N-1, embedding_dim) v_j = F.leaky_relu(self.v(edge_i_wo_self)) attn_output = torch.bmm(combined_weights, v_j).squeeze(1) # (B, embedding_dim) attention_outputs.append(attn_output) @@ -346,7 +349,7 @@ def get_action(self, obs, add_noise): """ action, connection, combined_weights = self.act(obs) if add_noise: - noise = np.random.normal(0, 0.4, size=action.shape) + noise = np.random.normal(0, 0.5, size=action.shape) noise = [n/4 if i%2 else n for i, n in enumerate(noise)] action = (action + noise ).clip(-self.max_action, self.max_action) @@ -609,7 +612,7 @@ def prepare_state(self, poses, distance, cos, sin, collision, goal, action, posi ang_vel = (act[1] + 1) / 2 # Assuming original range [-1, 1] # Final state vector - state = [x, y, heading_cos, heading_sin, distance[i]/17, cos[i], sin[i], lin_vel, ang_vel] + state = [x, y, heading_cos, heading_sin, distance[i]/17, cos[i], sin[i], lin_vel, ang_vel, gx, gy] assert len(state) == self.state_dim, f"State length mismatch: expected {self.state_dim}, got {len(state)}" states.append(state) diff --git a/robot_nav/multi_train2.py b/robot_nav/multi_train2.py index 29a4080..ef23c70 100644 --- a/robot_nav/multi_train2.py +++ b/robot_nav/multi_train2.py @@ -24,7 +24,7 @@ def main(args=None): """Main training function""" action_dim = 2 # number of actions produced by the model max_action = 1 # maximum absolute value of output actions - state_dim = 9 # number of input values in the neural network (vector length of state input) + state_dim = 11 # number of input values in the neural network (vector length of state input) device = torch.device( "cuda" if torch.cuda.is_available() else "cpu" ) # using cuda if it is available, cpu otherwise @@ -57,8 +57,8 @@ def main(args=None): device=device, save_every=save_every, load_model=True, - model_name="phase3", - load_model_name="phase2" + model_name="phase2", + load_model_name="phase1" ) # instantiate a model replay_buffer = get_buffer( diff --git a/robot_nav/sim2.py b/robot_nav/sim2.py index 0ca9c6c..f9d4158 100644 --- a/robot_nav/sim2.py +++ b/robot_nav/sim2.py @@ -252,7 +252,7 @@ def get_reward(goal, collision, action, closest_robots, distance): # if goal: # return 100.0 # elif collision: - # return -100.0 + # return -100.0 * 3 * action[0] # else: # r_dist = 1.5/distance # cl_pen = 0 @@ -279,14 +279,14 @@ def get_reward(goal, collision, action, closest_robots, distance): # phase3 if goal: - return 100.0 + return 70.0 elif collision: return -100.0 * 3 * action[0] else: r_dist = 1.5 / distance cl_pen = 0 for rob in closest_robots: - add = 1.5 - rob if rob < 1.5 else 0 + add = 2.5 - rob if rob < 2.5 else 0 cl_pen += add return -0.5 * abs(action[1]) - cl_pen From 87413543303e67f53a8ba7a4478888474203273a Mon Sep 17 00:00:00 2001 From: reinis Date: Tue, 1 Jul 2025 14:54:22 +0200 Subject: [PATCH 07/12] working phase 2 with low collision rate --- robot_nav/sim2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/robot_nav/sim2.py b/robot_nav/sim2.py index f9d4158..fa423da 100644 --- a/robot_nav/sim2.py +++ b/robot_nav/sim2.py @@ -286,7 +286,7 @@ def get_reward(goal, collision, action, closest_robots, distance): r_dist = 1.5 / distance cl_pen = 0 for rob in closest_robots: - add = 2.5 - rob if rob < 2.5 else 0 + add = (3 - rob)**2 if rob < 3 else 0 cl_pen += add return -0.5 * abs(action[1]) - cl_pen From 9a54277b0596d0b8300bfc19576d9b663cdd2ba2 Mon Sep 17 00:00:00 2001 From: reinis Date: Tue, 1 Jul 2025 19:47:04 +0200 Subject: [PATCH 08/12] working phase with polar other goal coordinates --- robot_nav/models/CNNTD3/att.py | 38 ++++++++++++++-------------- robot_nav/multi_train2.py | 4 +-- robot_nav/sim2.py | 46 +++++++++++++++++----------------- 3 files changed, 44 insertions(+), 44 deletions(-) diff --git a/robot_nav/models/CNNTD3/att.py b/robot_nav/models/CNNTD3/att.py index c2dc52a..3080577 100644 --- a/robot_nav/models/CNNTD3/att.py +++ b/robot_nav/models/CNNTD3/att.py @@ -32,8 +32,8 @@ def __init__(self, embedding_dim): # Soft attention projections self.q = nn.Linear(embedding_dim, embedding_dim, bias=False) - self.k = nn.Linear(9, embedding_dim, bias=False) - self.v = nn.Linear(9, embedding_dim) + self.k = nn.Linear(10, embedding_dim, bias=False) + self.v = nn.Linear(10, embedding_dim) # Soft attention score network (with distance) self.attn_score_layer = nn.Sequential( @@ -65,7 +65,7 @@ def forward(self, embedding): goal = embedding[:, :, -2:].reshape(batch_size, n_agents, 2) goal_j = goal.unsqueeze(1).expand(-1, n_agents, -1, -1) # (B, N, N, 2) pos_i = position.unsqueeze(2) # (B, N, 1, 2) - rel_goal = goal_j - pos_i + goal_rel_vec = goal_j - pos_i agent_embed = self.encode_agent_features(embed) agent_embed = agent_embed.view(batch_size, n_agents, self.embedding_dim) @@ -80,7 +80,7 @@ def forward(self, embedding): # Compute relative vectors and distance rel_vec = pos_j - pos_i # (B, N, N, 2) dx, dy = rel_vec[..., 0], rel_vec[..., 1] - rel_dist = torch.linalg.vector_norm(rel_vec, dim=-1, keepdim=True) # (B, N, N, 1) + rel_dist = torch.linalg.vector_norm(rel_vec, dim=-1, keepdim=True)/12 # (B, N, N, 1) # Relative angle in agent i's frame angle = torch.atan2(dy, dx) - torch.atan2(heading_i[..., 1], heading_i[..., 0]) @@ -119,7 +119,7 @@ def forward(self, embedding): hard_weights = F.gumbel_softmax(hard_logits, hard=False, tau=0.5, dim=-1)[..., 1].unsqueeze(2) hard_weights = hard_weights.view(batch_size, n_agents, n_agents - 1) - unnorm_rel_vec = rel_vec * 12 + unnorm_rel_vec = rel_vec unnorm_rel_dist = torch.linalg.vector_norm(unnorm_rel_vec, dim=-1, keepdim=True) unnorm_rel_dist = unnorm_rel_dist[:, mask].reshape(batch_size * n_agents, n_agents - 1, 1) @@ -129,7 +129,19 @@ def forward(self, embedding): attention_outputs = [] entropy_list = [] combined_w = [] - soft_edge_features = torch.cat([edge_features, rel_goal], dim=-1) + + goal_rel_dist = torch.linalg.vector_norm(goal_rel_vec, dim=-1, keepdim=True) + goal_angle_global = torch.atan2(goal_rel_vec[..., 1], goal_rel_vec[..., 0]) + heading_angle = torch.atan2(heading_i[..., 1], heading_i[..., 0]) + goal_rel_angle = goal_angle_global - heading_angle + goal_rel_angle = (goal_rel_angle + np.pi) % (2 * np.pi) - np.pi + goal_rel_angle_cos = torch.cos(goal_rel_angle).unsqueeze(-1) + goal_rel_angle_sin = torch.sin(goal_rel_angle).unsqueeze(-1) + goal_polar = torch.cat([goal_rel_dist, goal_rel_angle_cos, goal_rel_angle_sin], dim=-1) + + + + soft_edge_features = torch.cat([edge_features, goal_polar], dim=-1) for i in range(n_agents): q_i = q[:, i:i + 1, :] # (B, 1, D) mask = torch.ones(n_agents, dtype=torch.bool, device=edge_features.device) @@ -591,18 +603,6 @@ def prepare_state(self, poses, distance, cos, sin, collision, goal, action, posi px, py, theta = pose gx, gy = goal_pos - # Global position (keep for boundary awareness) - x = px / 12 - y = py / 12 - - # Relative goal position in local frame - dx = gx - px - dy = gy - py - rel_gx = dx * np.cos(theta) + dy * np.sin(theta) - rel_gy = -dx * np.sin(theta) + dy * np.cos(theta) - rel_gx /= 12 - rel_gy /= 12 - # Heading as cos/sin heading_cos = np.cos(theta) heading_sin = np.sin(theta) @@ -612,7 +612,7 @@ def prepare_state(self, poses, distance, cos, sin, collision, goal, action, posi ang_vel = (act[1] + 1) / 2 # Assuming original range [-1, 1] # Final state vector - state = [x, y, heading_cos, heading_sin, distance[i]/17, cos[i], sin[i], lin_vel, ang_vel, gx, gy] + state = [px, py, heading_cos, heading_sin, distance[i]/17, cos[i], sin[i], lin_vel, ang_vel, gx, gy] assert len(state) == self.state_dim, f"State length mismatch: expected {self.state_dim}, got {len(state)}" states.append(state) diff --git a/robot_nav/multi_train2.py b/robot_nav/multi_train2.py index ef23c70..0e83c4f 100644 --- a/robot_nav/multi_train2.py +++ b/robot_nav/multi_train2.py @@ -56,8 +56,8 @@ def main(args=None): num_robots=sim.num_robots, device=device, save_every=save_every, - load_model=True, - model_name="phase2", + load_model=False, + model_name="phase1", load_model_name="phase1" ) # instantiate a model diff --git a/robot_nav/sim2.py b/robot_nav/sim2.py index fa423da..04bb0f9 100644 --- a/robot_nav/sim2.py +++ b/robot_nav/sim2.py @@ -249,10 +249,25 @@ def get_reward(goal, collision, action, closest_robots, distance): # return 2*action[0] - abs(action[1]) - cl_pen + r_dist # phase1 + if goal: + return 100.0 + elif collision: + return -100.0 * 3 * action[0] + else: + r_dist = 1.5/distance + cl_pen = 0 + for rob in closest_robots: + add = 1.5 - rob if rob < 1.5 else 0 + cl_pen += add + + return action[0] - 0.5 * abs(action[1])-cl_pen + r_dist + + + # phase2 # if goal: # return 100.0 # elif collision: - # return -100.0 * 3 * action[0] + # return -100.0 # else: # r_dist = 1.5/distance # cl_pen = 0 @@ -260,34 +275,19 @@ def get_reward(goal, collision, action, closest_robots, distance): # add = 1.5 - rob if rob < 1.5 else 0 # cl_pen += add # - # return action[0] - 0.5 * abs(action[1])-cl_pen + r_dist - + # return -0.5*abs(action[1])-cl_pen - # phase2 + # phase3 # if goal: - # return 100.0 + # return 70.0 # elif collision: - # return -100.0 + # return -100.0 * 3 * action[0] # else: - # r_dist = 1.5/distance + # r_dist = 1.5 / distance # cl_pen = 0 # for rob in closest_robots: - # add = 1.5 - rob if rob < 1.5 else 0 + # add = (3 - rob)**2 if rob < 3 else 0 # cl_pen += add # - # return -0.5*abs(action[1])-cl_pen - - # phase3 - if goal: - return 70.0 - elif collision: - return -100.0 * 3 * action[0] - else: - r_dist = 1.5 / distance - cl_pen = 0 - for rob in closest_robots: - add = (3 - rob)**2 if rob < 3 else 0 - cl_pen += add - - return -0.5 * abs(action[1]) - cl_pen + # return -0.5 * abs(action[1]) - cl_pen From d290d93c200583e0dd61ccd5995acf1bad4e6e1f Mon Sep 17 00:00:00 2001 From: reinis Date: Wed, 2 Jul 2025 09:08:57 +0200 Subject: [PATCH 09/12] first refactor --- robot_nav/SIM_ENV/__init__.py | 0 robot_nav/{sim2.py => SIM_ENV/marl_sim.py} | 159 +++--- robot_nav/SIM_ENV/sim.py | 143 +++++ robot_nav/SIM_ENV/sim_env.py | 35 ++ robot_nav/models/CNNTD3/CNNTD3.py | 343 ++---------- robot_nav/models/CNNTD3/att.py | 621 --------------------- robot_nav/models/MARL/hsAttention.py | 229 ++++++++ robot_nav/models/MARL/marlTD3.py | 510 +++++++++++++++++ robot_nav/multi_robot_world.yaml | 43 +- robot_nav/multi_robot_world2.yaml | 27 - robot_nav/multi_train.py | 145 ----- robot_nav/multi_train2.py | 167 ------ robot_nav/sim.py | 209 ------- robot_nav/test.py | 9 +- robot_nav/test_random.py | 9 +- robot_nav/test_rnn.py | 4 +- robot_nav/train.py | 11 +- robot_nav/train_marl.py | 154 +++++ robot_nav/train_rnn.py | 4 +- tests/test_model.py | 7 +- tests/test_sim.py | 11 +- tests/test_utils.py | 9 +- 22 files changed, 1216 insertions(+), 1633 deletions(-) create mode 100644 robot_nav/SIM_ENV/__init__.py rename robot_nav/{sim2.py => SIM_ENV/marl_sim.py} (66%) create mode 100644 robot_nav/SIM_ENV/sim.py create mode 100644 robot_nav/SIM_ENV/sim_env.py delete mode 100644 robot_nav/models/CNNTD3/att.py create mode 100644 robot_nav/models/MARL/hsAttention.py create mode 100644 robot_nav/models/MARL/marlTD3.py delete mode 100644 robot_nav/multi_robot_world2.yaml delete mode 100644 robot_nav/multi_train.py delete mode 100644 robot_nav/multi_train2.py delete mode 100644 robot_nav/sim.py create mode 100644 robot_nav/train_marl.py diff --git a/robot_nav/SIM_ENV/__init__.py b/robot_nav/SIM_ENV/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/robot_nav/sim2.py b/robot_nav/SIM_ENV/marl_sim.py similarity index 66% rename from robot_nav/sim2.py rename to robot_nav/SIM_ENV/marl_sim.py index 04bb0f9..f8b1961 100644 --- a/robot_nav/sim2.py +++ b/robot_nav/SIM_ENV/marl_sim.py @@ -1,11 +1,12 @@ import irsim import numpy as np import random - import torch +from robot_nav.SIM_ENV.sim_env import SIM_ENV + -class SIM_ENV: +class MARL_SIM(SIM_ENV): """ A simulation environment interface for robot navigation using IRSim. @@ -17,7 +18,7 @@ class SIM_ENV: robot_goal (np.ndarray): The goal position of the robot. """ - def __init__(self, world_file="robot_world.yaml", disable_plotting=False): + def __init__(self, world_file="multi_robot_world.yaml", disable_plotting=False): """ Initialize the simulation environment. @@ -33,7 +34,7 @@ def __init__(self, world_file="robot_world.yaml", disable_plotting=False): self.robot_goal = robot_info.goal self.num_robots = len(self.env.robot_list) - def step(self, action, connection, combined_weights = None): + def step(self, action, connection, combined_weights=None): """ Perform one step in the simulation using the given control commands. @@ -58,12 +59,23 @@ def step(self, action, connection, combined_weights = None): rewards = [] positions = [] goal_positions = [] - robot_states = [[self.env.robot_list[i].state[0], self.env.robot_list[i].state[1]] for i in range(self.num_robots)] + robot_states = [ + [self.env.robot_list[i].state[0], self.env.robot_list[i].state[1]] + for i in range(self.num_robots) + ] for i in range(self.num_robots): robot_state = self.env.robot_list[i].state - closest_robots = [np.linalg.norm([robot_states[j][0] - robot_state[0], robot_states[j][1] - robot_state[1]]) for j in - range(self.num_robots) if j != i] + closest_robots = [ + np.linalg.norm( + [ + robot_states[j][0] - robot_state[0], + robot_states[j][1] - robot_state[1], + ] + ) + for j in range(self.num_robots) + if j != i + ] robot_goal = self.env.robot_list[i].goal goal_vector = [ robot_goal[0].item() - robot_state[0].item(), @@ -75,7 +87,9 @@ def step(self, action, connection, combined_weights = None): cos, sin = self.cossin(pose_vector, goal_vector) collision = self.env.robot_list[i].collision action_i = action[i] - reward = self.get_reward(goal, collision, action_i, closest_robots, distance) + reward = self.get_reward( + goal, collision, action_i, closest_robots, distance + ) position = [robot_state[0].item(), robot_state[1].item()] goal_position = [robot_goal[0].item(), robot_goal[1].item()] @@ -87,14 +101,14 @@ def step(self, action, connection, combined_weights = None): goals.append(goal) rewards.append(reward) positions.append(position) - poses.append([robot_state[0].item(), robot_state[1].item(), robot_state[2].item()]) + poses.append( + [robot_state[0].item(), robot_state[1].item(), robot_state[2].item()] + ) goal_positions.append(goal_position) - # gumbel_sample = torch.nn.functional.gumbel_softmax(connection[i], tau=0.5, dim=-1) - # i_connections = gumbel_sample.tolist() - # i_connections.insert(i, 0) - - i_probs = torch.sigmoid(connection[i]) # connection[i] is logits for "connect" per pair + i_probs = torch.sigmoid( + connection[i] + ) # connection[i] is logits for "connect" per pair # Now we need to insert the self-connection (optional, typically skipped) i_connections = i_probs.tolist() @@ -103,8 +117,6 @@ def step(self, action, connection, combined_weights = None): i_weights = combined_weights[i].tolist() i_weights.insert(i, 0) - - for j in range(self.num_robots): if i_connections[j] > 0.5: if combined_weights is not None: @@ -112,21 +124,38 @@ def step(self, action, connection, combined_weights = None): else: weight = 1 other_robot_state = self.env.robot_list[j].state - other_pos = [other_robot_state[0].item(), other_robot_state[1].item()] + other_pos = [ + other_robot_state[0].item(), + other_robot_state[1].item(), + ] rx = [position[0], other_pos[0]] ry = [position[1], other_pos[1]] - self.env.draw_trajectory(np.array([rx, ry]), refresh=True, linewidth=weight) + self.env.draw_trajectory( + np.array([rx, ry]), refresh=True, linewidth=weight + ) if goal: self.env.robot_list[i].set_random_goal( obstacle_list=self.env.obstacle_list, init=True, - # range_limits=[[self.env.robot_list[i].position[0].item()-3, self.env.robot_list[i].position[1].item()-3, -3.141592653589793], [self.env.robot_list[i].position[0].item()+3, self.env.robot_list[i].position[1].item()+3, 3.141592653589793]], - range_limits=[[1, 1, -3.141592653589793], - [11, 11, 3.141592653589793]], + range_limits=[ + [1, 1, -3.141592653589793], + [11, 11, 3.141592653589793], + ], ) - return poses, distances, coss, sins, collisions, goals, action, rewards, positions, goal_positions + return ( + poses, + distances, + coss, + sins, + collisions, + goals, + action, + rewards, + positions, + goal_positions, + ) def reset( self, @@ -156,7 +185,11 @@ def reset( conflict = True while conflict: conflict = False - robot_state = [[random.uniform(3, 9)], [random.uniform(3, 9)], [random.uniform(-3.14, 3.14)]] + robot_state = [ + [random.uniform(3, 9)], + [random.uniform(3, 9)], + [random.uniform(-3.14, 3.14)], + ] pos = [robot_state[0][0], robot_state[1][0]] for loc in init_states: vector = [ @@ -187,9 +220,10 @@ def reset( robot.set_random_goal( obstacle_list=self.env.obstacle_list, init=True, - # range_limits=[[robot.position[0].item()-3, robot.position[1].item()-3, -3.141592653589793], [robot.position[0].item()+3, robot.position[1].item()+3, 3.141592653589793]], - range_limits=[[1, 1, -3.141592653589793], - [11, 11, 3.141592653589793]], + range_limits=[ + [1, 1, -3.141592653589793], + [11, 11, 3.141592653589793], + ], ) else: self.env.robot.set_goal(np.array(robot_goal), init=True) @@ -197,27 +231,24 @@ def reset( self.robot_goal = self.env.robot.goal action = [[0.0, 0.0] for _ in range(self.num_robots)] - con = torch.tensor([[0. for _ in range(self.num_robots-1)] for _ in range(self.num_robots)]) - poses, distance, cos, sin, _, _, action, reward, positions, goal_positions = self.step(action, con) - return poses, distance, cos, sin, [False]*self.num_robots, [False]*self.num_robots, action, reward, positions, goal_positions - - @staticmethod - def cossin(vec1, vec2): - """ - Compute the cosine and sine of the angle between two 2D vectors. - - Args: - vec1 (list): First 2D vector. - vec2 (list): Second 2D vector. - - Returns: - (tuple): (cosine, sine) of the angle between the vectors. - """ - vec1 = vec1 / np.linalg.norm(vec1) - vec2 = vec2 / np.linalg.norm(vec2) - cos = np.dot(vec1, vec2) - sin = vec1[0] * vec2[1] - vec1[1] * vec2[0] - return cos, sin + con = torch.tensor( + [[0.0 for _ in range(self.num_robots - 1)] for _ in range(self.num_robots)] + ) + poses, distance, cos, sin, _, _, action, reward, positions, goal_positions = ( + self.step(action, con) + ) + return ( + poses, + distance, + cos, + sin, + [False] * self.num_robots, + [False] * self.num_robots, + action, + reward, + positions, + goal_positions, + ) @staticmethod def get_reward(goal, collision, action, closest_robots, distance): @@ -233,20 +264,6 @@ def get_reward(goal, collision, action, closest_robots, distance): Returns: (float): Computed reward for the current state. """ - # if goal: - # return 60.0 - # elif collision: - # return -100.0 - # else: - # cl_pen = 0 - # for rob in closest_robots: - # add = 1.5 - rob if rob < 1.5 else 0 - # cl_pen += add - # return -cl_pen - # r_dist = 1.25/distance - # cl_robot = min(closest_robots) - # cl_pen = 0 - cl_robot if cl_robot < 0 else 0 - # return 2*action[0] - abs(action[1]) - cl_pen + r_dist # phase1 if goal: @@ -254,31 +271,16 @@ def get_reward(goal, collision, action, closest_robots, distance): elif collision: return -100.0 * 3 * action[0] else: - r_dist = 1.5/distance + r_dist = 1.5 / distance cl_pen = 0 for rob in closest_robots: add = 1.5 - rob if rob < 1.5 else 0 cl_pen += add - return action[0] - 0.5 * abs(action[1])-cl_pen + r_dist - + return action[0] - 0.5 * abs(action[1]) - cl_pen + r_dist # phase2 # if goal: - # return 100.0 - # elif collision: - # return -100.0 - # else: - # r_dist = 1.5/distance - # cl_pen = 0 - # for rob in closest_robots: - # add = 1.5 - rob if rob < 1.5 else 0 - # cl_pen += add - # - # return -0.5*abs(action[1])-cl_pen - - # phase3 - # if goal: # return 70.0 # elif collision: # return -100.0 * 3 * action[0] @@ -290,4 +292,3 @@ def get_reward(goal, collision, action, closest_robots, distance): # cl_pen += add # # return -0.5 * abs(action[1]) - cl_pen - diff --git a/robot_nav/SIM_ENV/sim.py b/robot_nav/SIM_ENV/sim.py new file mode 100644 index 0000000..c341373 --- /dev/null +++ b/robot_nav/SIM_ENV/sim.py @@ -0,0 +1,143 @@ +import irsim +import numpy as np +import random + +from robot_nav.SIM_ENV.sim_env import SIM_ENV + + +class SIM(SIM_ENV): + """ + A simulation environment interface for robot navigation using IRSim. + + This class wraps around the IRSim environment and provides methods for stepping, + resetting, and interacting with a mobile robot, including reward computation. + + Attributes: + env (object): The simulation environment instance from IRSim. + robot_goal (np.ndarray): The goal position of the robot. + """ + + def __init__(self, world_file="robot_world.yaml", disable_plotting=False): + """ + Initialize the simulation environment. + + Args: + world_file (str): Path to the world configuration YAML file. + disable_plotting (bool): If True, disables rendering and plotting. + """ + display = False if disable_plotting else True + self.env = irsim.make( + world_file, disable_all_plot=disable_plotting, display=display + ) + robot_info = self.env.get_robot_info(0) + self.robot_goal = robot_info.goal + + def step(self, lin_velocity=0.0, ang_velocity=0.1): + """ + Perform one step in the simulation using the given control commands. + + Args: + lin_velocity (float): Linear velocity to apply to the robot. + ang_velocity (float): Angular velocity to apply to the robot. + + Returns: + (tuple): Contains the latest LIDAR scan, distance to goal, cosine and sine of angle to goal, + collision flag, goal reached flag, applied action, and computed reward. + """ + self.env.step(action_id=0, action=np.array([[lin_velocity], [ang_velocity]])) + self.env.render() + + scan = self.env.get_lidar_scan() + latest_scan = scan["ranges"] + + robot_state = self.env.get_robot_state() + goal_vector = [ + self.robot_goal[0].item() - robot_state[0].item(), + self.robot_goal[1].item() - robot_state[1].item(), + ] + distance = np.linalg.norm(goal_vector) + goal = self.env.robot.arrive + pose_vector = [np.cos(robot_state[2]).item(), np.sin(robot_state[2]).item()] + cos, sin = self.cossin(pose_vector, goal_vector) + collision = self.env.robot.collision + action = [lin_velocity, ang_velocity] + reward = self.get_reward(goal, collision, action, latest_scan) + + return latest_scan, distance, cos, sin, collision, goal, action, reward + + def reset( + self, + robot_state=None, + robot_goal=None, + random_obstacles=True, + random_obstacle_ids=None, + ): + """ + Reset the simulation environment, optionally setting robot and obstacle states. + + Args: + robot_state (list or None): Initial state of the robot as a list of [x, y, theta, speed]. + robot_goal (list or None): Goal state for the robot. + random_obstacles (bool): Whether to randomly reposition obstacles. + random_obstacle_ids (list or None): Specific obstacle IDs to randomize. + + Returns: + (tuple): Initial observation after reset, including LIDAR scan, distance, cos/sin, + and reward-related flags and values. + """ + if robot_state is None: + robot_state = [[random.uniform(1, 9)], [random.uniform(1, 9)], [0]] + + self.env.robot.set_state( + state=np.array(robot_state), + init=True, + ) + + if random_obstacles: + if random_obstacle_ids is None: + random_obstacle_ids = [i + 1 for i in range(7)] + self.env.random_obstacle_position( + range_low=[0, 0, -3.14], + range_high=[10, 10, 3.14], + ids=random_obstacle_ids, + non_overlapping=True, + ) + + if robot_goal is None: + self.env.robot.set_random_goal( + obstacle_list=self.env.obstacle_list, + init=True, + range_limits=[[1, 1, -3.141592653589793], [9, 9, 3.141592653589793]], + ) + else: + self.env.robot.set_goal(np.array(robot_goal), init=True) + self.env.reset() + self.robot_goal = self.env.robot.goal + + action = [0.0, 0.0] + latest_scan, distance, cos, sin, _, _, action, reward = self.step( + lin_velocity=action[0], ang_velocity=action[1] + ) + return latest_scan, distance, cos, sin, False, False, action, reward + + @staticmethod + def get_reward(goal, collision, action, laser_scan): + """ + Calculate the reward for the current step. + + Args: + goal (bool): Whether the goal has been reached. + collision (bool): Whether a collision occurred. + action (list): The action taken [linear velocity, angular velocity]. + laser_scan (list): The LIDAR scan readings. + + Returns: + (float): Computed reward for the current state. + """ + if goal: + return 100.0 + elif collision: + return -100.0 + else: + r3 = lambda x: 1.35 - x if x < 1.35 else 0.0 + return action[0] - abs(action[1]) / 2 - r3(min(laser_scan)) / 2 diff --git a/robot_nav/SIM_ENV/sim_env.py b/robot_nav/SIM_ENV/sim_env.py new file mode 100644 index 0000000..4cd6f77 --- /dev/null +++ b/robot_nav/SIM_ENV/sim_env.py @@ -0,0 +1,35 @@ +from abc import ABC, abstractmethod +import numpy as np + + +class SIM_ENV(ABC): + @abstractmethod + def step(self): + raise NotImplementedError("step method must be implemented by subclass.") + + @abstractmethod + def reset(self): + raise NotImplementedError("reset method must be implemented by subclass.") + + @staticmethod + def cossin(vec1, vec2): + """ + Compute the cosine and sine of the angle between two 2D vectors. + + Args: + vec1 (list): First 2D vector. + vec2 (list): Second 2D vector. + + Returns: + (tuple): (cosine, sine) of the angle between the vectors. + """ + vec1 = vec1 / np.linalg.norm(vec1) + vec2 = vec2 / np.linalg.norm(vec2) + cos = np.dot(vec1, vec2) + sin = vec1[0] * vec2[1] - vec1[1] * vec2[0] + return cos, sin + + @staticmethod + @abstractmethod + def get_reward(): + raise NotImplementedError("get_reward method must be implemented by subclass.") diff --git a/robot_nav/models/CNNTD3/CNNTD3.py b/robot_nav/models/CNNTD3/CNNTD3.py index 31b6946..0be20da 100644 --- a/robot_nav/models/CNNTD3/CNNTD3.py +++ b/robot_nav/models/CNNTD3/CNNTD3.py @@ -10,262 +10,6 @@ from robot_nav.utils import get_max_bound -class Attention(nn.Module): - def __init__(self, embedding_dim): - super(Attention, self).__init__() - self.embedding_dim = embedding_dim - - # CNN for laser scan - self.cnn1 = nn.Conv1d(1, 4, kernel_size=8, stride=4) - self.cnn2 = nn.Conv1d(4, 8, kernel_size=8, stride=4) - self.cnn3 = nn.Conv1d(8, 4, kernel_size=4, stride=2) - - # Embedding for goal and action - self.goal_embed = nn.Linear(3, 10) - self.action_embed = nn.Linear(2, 10) - - # Output of CNN + goal + action embeddings - self.layer_1 = nn.Linear(24, 48) - - # Hard attention MLP with distance - self.hard_mlp = nn.Sequential( - nn.Linear(embedding_dim * 2 + 1, embedding_dim), - nn.ReLU(), - nn.Linear(embedding_dim, embedding_dim) - ) - self.hard_encoding = nn.Linear(embedding_dim, 2) - - # Soft attention projections - self.q = nn.Linear(embedding_dim, embedding_dim, bias=False) - self.k = nn.Linear(embedding_dim, embedding_dim, bias=False) - self.v = nn.Linear(embedding_dim, embedding_dim) - - # Soft attention score network (with distance) - self.attn_score_layer = nn.Sequential( - nn.Linear(embedding_dim + 1, embedding_dim), - nn.ReLU(), - nn.Linear(embedding_dim, 1) - ) - - # Decoder - self.decode_1 = nn.Linear(embedding_dim * 2, 400) - nn.init.kaiming_uniform_(self.decode_1.weight, nonlinearity="leaky_relu") - self.decode_2 = nn.Linear(400, 300) - nn.init.kaiming_uniform_(self.decode_2.weight, nonlinearity="leaky_relu") - self.decode_3 = nn.Linear(300, 2) - self.tanh = nn.Tanh() - - def encode_agent_features(self, laser, goal, act): - laser = laser.unsqueeze(1) - l = F.leaky_relu(self.cnn1(laser)) - l = F.leaky_relu(self.cnn2(l)) - l = F.leaky_relu(self.cnn3(l)) - l = l.flatten(start_dim=1) - - g = F.leaky_relu(self.goal_embed(goal)) - a = F.leaky_relu(self.action_embed(act)) - - features = torch.cat((l, g, a), dim=-1) - return F.leaky_relu(self.layer_1(features)) - - def forward(self, embedding): - if embedding.dim() == 2: - embedding = embedding.unsqueeze(0) - batch_size, n_agents, _ = embedding.shape - - laser = embedding[:, :, :-7].reshape(batch_size * n_agents, -1) - goal = embedding[:, :, -7:-4].reshape(batch_size * n_agents, -1) - act = embedding[:, :, -4:-2].reshape(batch_size * n_agents, -1) - position = embedding[:, :, -2:].reshape(batch_size, n_agents, 2) - - agent_embed = self.encode_agent_features(laser, goal, act) - agent_embed = agent_embed.view(batch_size, n_agents, self.embedding_dim) - - # Hard attention - h_i = agent_embed.unsqueeze(2) # (B, N, 1, D) - h_j = agent_embed.unsqueeze(1) # (B, 1, N, D) - pos_i = position.unsqueeze(2) # (B, N, 1, 2) - pos_j = position.unsqueeze(1) # (B, 1, N, 2) - - pairwise_dist = torch.norm(pos_i - pos_j, dim=-1, keepdim=True) # (B, N, N, 1) - - mask = ~torch.eye(n_agents, dtype=torch.bool, device=embedding.device) - - h_i = h_i.expand(-1, -1, n_agents, -1) - h_j = h_j.expand(-1, n_agents, -1, -1) - d_ij = pairwise_dist # already correct shape - - hard_pairs = torch.cat([h_i, h_j], dim=-1) # (B, N, N, 2D) - hard_pairs = hard_pairs[:, mask].reshape(batch_size * n_agents, n_agents - 1, self.embedding_dim * 2) - d_ij = d_ij[:, mask].reshape(batch_size * n_agents, n_agents - 1, 1) - - hard_input = torch.cat([hard_pairs, d_ij], dim=-1) # (B*N, N-1, 2D+1) - h_hard = self.hard_mlp(hard_input) - hard_logits = self.hard_encoding(h_hard) - hard_weights = F.gumbel_softmax(hard_logits, tau=0.01, dim=-1)[..., 1].unsqueeze(2) - hard_weights = hard_weights.view(batch_size, n_agents, n_agents - 1) - - # Soft attention - q = self.q(agent_embed) # (B, N, D) - k = self.k(agent_embed) - v = F.relu(self.v(agent_embed)) - - attention_outputs = [] - for i in range(n_agents): - q_i = q[:, i].unsqueeze(1) # (B, 1, D) - k_j = torch.cat([k[:, :i], k[:, i+1:]], dim=1) # (B, N-1, D) - v_j = torch.cat([v[:, :i], v[:, i+1:]], dim=1) - pos_i = position[:, i].unsqueeze(1) # (B, 1, 2) - pos_j = torch.cat([position[:, :i], position[:, i+1:]], dim=1) # (B, N-1, 2) - dist_ij = torch.norm(pos_i - pos_j, dim=-1, keepdim=True) # (B, N-1, 1) - - q_i_ext = q_i.expand(-1, k_j.shape[1], -1) # (B, N-1, D) - qk_comb = q_i_ext * k_j # (B, N-1, D) - qk_with_dist = torch.cat([qk_comb, dist_ij], dim=-1) # (B, N-1, D+1) - - scores = self.attn_score_layer(qk_with_dist).transpose(1, 2) # (B, 1, N-1) - soft_weights = F.softmax(scores, dim=-1) # (B, 1, N-1) - - h_weights = hard_weights[:, i].unsqueeze(1) # (B, 1, N-1) - v_j = v_j.unsqueeze(1) # (B, 1, N-1, D) - combined_weights = soft_weights * h_weights # (B, 1, N-1) - attn_output = (v_j * combined_weights.unsqueeze(-1)).sum(dim=2) # (B, 1, D) - attention_outputs.append(attn_output.squeeze(1)) # (B, D) - - attn_stack = torch.stack(attention_outputs, dim=1).reshape(-1, self.embedding_dim) - self_embed = agent_embed.reshape(-1, self.embedding_dim) - concat_embed = torch.cat([self_embed, attn_stack], dim=-1) - - # Decode - x = F.leaky_relu(self.decode_1(concat_embed)) - x = F.leaky_relu(self.decode_2(x)) - action = self.tanh(self.decode_3(x)) - return action, hard_weights - - -class HardAttention(nn.Module): - def __init__(self, embedding_dim): - super(HardAttention, self).__init__() - self.embedding_dim = embedding_dim - # self.hard_bi_GRU = nn.GRU(args.rnn_hidden_dim * 2, args.rnn_hidden_dim, bidirectional=True) - self.cnn1 = nn.Conv1d(1, 4, kernel_size=8, stride=4) - self.cnn2 = nn.Conv1d(4, 8, kernel_size=8, stride=4) - self.cnn3 = nn.Conv1d(8, 4, kernel_size=4, stride=2) - - self.goal_embed = nn.Linear(3, 10) - self.action_embed = nn.Linear(2, 10) - - self.layer_1 = nn.Linear(24, 48) - - self.hard_encoding = nn.Linear(embedding_dim, 2) - - self.hard_mlp = nn.Sequential( - nn.Linear(embedding_dim * 2, embedding_dim), - nn.ReLU(), - nn.Linear(embedding_dim, embedding_dim) - ) - - # Soft - self.q = nn.Linear(embedding_dim, embedding_dim, bias=False) - self.k = nn.Linear(embedding_dim, embedding_dim, bias=False) - self.v = nn.Linear(embedding_dim, embedding_dim) - - self.decode_1 = nn.Linear(96, 400) - torch.nn.init.kaiming_uniform_(self.decode_1.weight, nonlinearity="leaky_relu") - self.decode_2 = nn.Linear(400, 300) - torch.nn.init.kaiming_uniform_(self.decode_2.weight, nonlinearity="leaky_relu") - self.decode_3 = nn.Linear(300, 2) - self.tanh = nn.Tanh() - - def forward(self, embedding): - - - if len(embedding.shape) == 2: - embedding = embedding.unsqueeze(0) - batch_size, n_agents, _ = embedding.shape - - laser = embedding[:,:, :-5].reshape(batch_size * n_agents, -1) - goal = embedding[:,:, -5:-2].reshape(batch_size * n_agents, -1) - act = embedding[:,:, -2:].reshape(batch_size * n_agents, -1) - laser = laser.unsqueeze(1) - - l = F.leaky_relu(self.cnn1(laser)) - l = F.leaky_relu(self.cnn2(l)) - l = F.leaky_relu(self.cnn3(l)) - l = l.flatten(start_dim=1) - g = F.leaky_relu(self.goal_embed(goal)) - a = F.leaky_relu(self.action_embed(act)) - s = torch.concat((l, g, a), dim=-1) - embedding = F.leaky_relu(self.layer_1(s)) - - - - embedding = embedding.view(batch_size, n_agents, self.embedding_dim) - input_hard = [] - for i in range(n_agents): - h_i = embedding[:, i] - h_hard_i = [] - for j in range(n_agents): - if j != i: - h_hard_i.append(torch.cat([h_i, embedding[:, j]], dim=-1)) - h_hard_i = torch.stack(h_hard_i, dim=0) - input_hard.append(h_hard_i) - - input_hard = torch.stack(input_hard, dim=-2) - input_hard = input_hard.permute(2, 0, 1, 3) # (batch_size, n_agents, n_agents - 1, emb_dim * 2) - input_hard = input_hard.reshape(batch_size * n_agents, n_agents - 1, self.embedding_dim * 2) - - # h_hard = torch.zeros((2 * 1, size, self.embedding_dim)) - # h_hard = h_hard.cuda() - input_hard_reshaped = input_hard.permute(1, 0, 2) # (batch_size * n_agents, n_agents - 1, rnn_hidden_dim * 2) - h_hard = self.hard_mlp(input_hard_reshaped) # Apply MLP over each pair - h_hard = h_hard.reshape(-1, - self.embedding_dim) # (batch_size * n_agents * (n_agents - 1), rnn_hidden_dim * 2) - - hard_weights = self.hard_encoding(h_hard) - hard_weights = F.gumbel_softmax(hard_weights, tau=0.01) - # print(hard_weights) - hard_weights = hard_weights[:, 1].view(-1, n_agents, 1, n_agents - 1) - hard_weights = hard_weights.permute(1, 0, 2, 3) - - embed = embedding.reshape(batch_size * n_agents, self.embedding_dim) - q = self.q(embed).reshape(batch_size, n_agents, self.embedding_dim) # (batch_size, n_agents, args.attention_dim) - k = self.k(embed).reshape(batch_size, n_agents, self.embedding_dim) # (batch_size, n_agents, args.attention_dim) - v = F.relu(self.v(embed)).reshape(batch_size, n_agents, self.embedding_dim) # (batch_size, n_agents, args.attention_dim) - x = [] - - for i in range(n_agents): - q_i = q[:, i].view(-1, 1, self.embedding_dim) # agent i的q,(batch_size, 1, args.attention_dim) - k_i = [k[:, j] for j in range(n_agents) if j != i] # 对于agent i来说,其他agent的k - v_i = [v[:, j] for j in range(n_agents) if j != i] # 对于agent i来说,其他agent的v - - k_i = torch.stack(k_i, dim=0) # (n_agents - 1, batch_size, args.attention_dim) - k_i = k_i.permute(1, 2, 0) # 交换三个维度,变成(batch_size, args.attention_dim, n_agents - 1) - v_i = torch.stack(v_i, dim=0) - v_i = v_i.permute(1, 2, 0) - - score = torch.matmul(q_i, k_i) - - scaled_score = score / np.sqrt(self.embedding_dim) - - soft_weight = F.softmax(scaled_score, dim=-1) # (batch_size,1, n_agents - 1) - - x_i = (v_i * soft_weight * hard_weights[i]).sum(dim=-1) - - x.append(x_i) - - x = torch.stack(x, dim=1).reshape(-1, self.embedding_dim) - final_input = torch.cat([embed, x], dim=-1) - - s = F.leaky_relu(self.decode_1(final_input)) - s = F.leaky_relu(self.decode_2(s)) - a = self.tanh(self.decode_3(s)) - - return a - - - - class Actor(nn.Module): """ Actor network for the CNNTD3 agent. @@ -295,7 +39,7 @@ def __init__(self, action_dim): self.goal_embed = nn.Linear(3, 10) self.action_embed = nn.Linear(2, 10) - self.layer_1 = nn.Linear(24, 400) + self.layer_1 = nn.Linear(36, 400) torch.nn.init.kaiming_uniform_(self.layer_1.weight, nonlinearity="leaky_relu") self.layer_2 = nn.Linear(400, 300) torch.nn.init.kaiming_uniform_(self.layer_2.weight, nonlinearity="leaky_relu") @@ -366,7 +110,7 @@ def __init__(self, action_dim): self.goal_embed = nn.Linear(3, 10) self.action_embed = nn.Linear(2, 10) - self.layer_1 = nn.Linear(24, 400) + self.layer_1 = nn.Linear(36, 400) torch.nn.init.kaiming_uniform_(self.layer_1.weight, nonlinearity="leaky_relu") self.layer_2_s = nn.Linear(400, 300) torch.nn.init.kaiming_uniform_(self.layer_2_s.weight, nonlinearity="leaky_relu") @@ -375,7 +119,7 @@ def __init__(self, action_dim): self.layer_3 = nn.Linear(300, 1) torch.nn.init.kaiming_uniform_(self.layer_3.weight, nonlinearity="leaky_relu") - self.layer_4 = nn.Linear(24, 400) + self.layer_4 = nn.Linear(36, 400) torch.nn.init.kaiming_uniform_(self.layer_1.weight, nonlinearity="leaky_relu") self.layer_5_s = nn.Linear(400, 300) torch.nn.init.kaiming_uniform_(self.layer_5_s.weight, nonlinearity="leaky_relu") @@ -398,9 +142,9 @@ def forward(self, s, action): - q1 (torch.Tensor): First Q-value estimate (batch_size, 1). - q2 (torch.Tensor): Second Q-value estimate (batch_size, 1). """ - laser = s[:, :-7] - goal = s[:, -7:-4] - act = s[:, -4:-2] + laser = s[:, :-5] + goal = s[:, -5:-2] + act = s[:, -2:] laser = laser.unsqueeze(1) l = F.leaky_relu(self.cnn1(laser)) @@ -464,7 +208,6 @@ def __init__( action_dim, max_action, device, - num_robots, lr=1e-4, save_every=0, load_model=False, @@ -475,12 +218,9 @@ def __init__( bound_weight=0.25, ): # Initialize the Actor network - self.num_robots = num_robots self.device = device - # self.actor = Actor(action_dim).to(self.device) - # self.actor_target = Actor(action_dim).to(self.device) - self.actor = Attention(48).to(self.device) - self.actor_target = Attention(48).to(self.device) + self.actor = Actor(action_dim).to(self.device) + self.actor_target = Actor(action_dim).to(self.device) self.actor_target.load_state_dict(self.actor.state_dict()) self.actor_optimizer = torch.optim.Adam(params=self.actor.parameters(), lr=lr) @@ -490,8 +230,6 @@ def __init__( self.critic_target.load_state_dict(self.critic.state_dict()) self.critic_optimizer = torch.optim.Adam(params=self.critic.parameters(), lr=lr) - # self.attention = HardAttention(48).to(self.device) - self.action_dim = action_dim self.max_action = max_action self.state_dim = state_dim @@ -516,13 +254,12 @@ def get_action(self, obs, add_noise): Returns: (np.ndarray): The selected action. """ - action, connection = self.act(obs) if add_noise: return ( - action + np.random.normal(0, 0.2, size=self.action_dim) + self.act(obs) + np.random.normal(0, 0.2, size=self.action_dim) ).clip(-self.max_action, self.max_action) else: - return action.reshape(-1, 2), connection + return self.act(obs) def act(self, state): """ @@ -536,9 +273,7 @@ def act(self, state): """ # Function to get the action from the actor state = torch.Tensor(state).to(self.device) - # res = self.attention(state) - action, connection = self.actor(state) - return action.cpu().data.numpy().flatten(), connection + return self.actor(state).cpu().data.numpy().flatten() # training cycle def train( @@ -587,23 +322,21 @@ def train( batch_dones, batch_next_states, ) = replay_buffer.sample_batch(batch_size) - state = torch.Tensor(batch_states).to(self.device).view(-1, self.state_dim) - next_state = torch.Tensor(batch_next_states).to(self.device).view(-1, self.state_dim) - action = torch.Tensor(batch_actions).to(self.device).view(-1, 2) - reward = torch.Tensor(batch_rewards).to(self.device).view(-1, 1) - done = torch.Tensor(batch_dones).to(self.device).view(-1, 1) - + state = torch.Tensor(batch_states).to(self.device) + next_state = torch.Tensor(batch_next_states).to(self.device) + action = torch.Tensor(batch_actions).to(self.device) + reward = torch.Tensor(batch_rewards).to(self.device) + done = torch.Tensor(batch_dones).to(self.device) # Obtain the estimated action from the next state by using the actor-target - next_action, _ = self.actor_target(next_state) + next_action = self.actor_target(next_state) # Add noise to the action noise = ( torch.Tensor(batch_actions) .data.normal_(0, policy_noise) .to(self.device) - ).reshape(-1, 2) - + ) noise = noise.clamp(-noise_clip, noise_clip) next_action = (next_action + noise).clamp(-self.max_action, self.max_action) @@ -650,8 +383,7 @@ def train( if it % policy_freq == 0: # Maximize the actor output value by performing gradient descent on negative Q values # (essentially perform gradient ascent) - actn, _ = self.actor(state) - actor_grad, _ = self.critic(state, actn) + actor_grad, _ = self.critic(state, self.actor(state)) actor_grad = -actor_grad.mean() self.actor_optimizer.zero_grad() actor_grad.backward() @@ -725,7 +457,7 @@ def load(self, filename, directory): ) print(f"Loaded weights from: {directory}") - def prepare_state(self, latest_scan, distance, cos, sin, collision, goal, action, positions): + def prepare_state(self, latest_scan, distance, cos, sin, collision, goal, action): """ Prepares the environment's raw sensor data and navigation variables into a format suitable for learning. @@ -744,30 +476,19 @@ def prepare_state(self, latest_scan, distance, cos, sin, collision, goal, action - state (list): Normalized and concatenated state vector. - terminal (int): Terminal flag (1 if collision or goal, else 0). """ - states = [] - terminal = [] - for i in range(self.num_robots ): - scan = np.array(latest_scan[i]) - dist = distance[i] - cos_i = cos[i] - sin_i = sin[i] - act = action[i] - - inf_mask = np.isinf(scan) - scan[inf_mask] = 7.0 - scan /= 7 - - # Normalize to [0, 1] range - dist /= 10 - lin_vel = act[0] * 2 - ang_vel = (act[1] + 1) / 2 + latest_scan = np.array(latest_scan) - position = positions[i] - state = scan.tolist() + [dist, cos_i, sin_i] + [lin_vel, ang_vel] + position + inf_mask = np.isinf(latest_scan) + latest_scan[inf_mask] = 7.0 + latest_scan /= 7 - assert len(state) == self.state_dim - states.append(state) + # Normalize to [0, 1] range + distance /= 10 + lin_vel = action[0] * 2 + ang_vel = (action[1] + 1) / 2 + state = latest_scan.tolist() + [distance, cos, sin] + [lin_vel, ang_vel] - terminal.append(collision[i]) + assert len(state) == self.state_dim + terminal = 1 if collision or goal else 0 - return states, terminal + return state, terminal diff --git a/robot_nav/models/CNNTD3/att.py b/robot_nav/models/CNNTD3/att.py deleted file mode 100644 index 3080577..0000000 --- a/robot_nav/models/CNNTD3/att.py +++ /dev/null @@ -1,621 +0,0 @@ -from pathlib import Path - -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -from numpy import inf -from torch.utils.tensorboard import SummaryWriter - -from robot_nav.utils import get_max_bound - - -class Attention(nn.Module): - def __init__(self, embedding_dim): - super(Attention, self).__init__() - self.embedding_dim = embedding_dim - - # CNN for laser scan - self.embedding1= nn.Linear(5, 128) - nn.init.kaiming_uniform_(self.embedding1.weight, nonlinearity="leaky_relu") - self.embedding2 = nn.Linear(128, embedding_dim) - nn.init.kaiming_uniform_(self.embedding2.weight, nonlinearity="leaky_relu") - - - # Hard attention MLP with distance - self.hard_mlp = nn.Sequential( - nn.Linear(embedding_dim + 7, embedding_dim), - nn.ReLU(), - nn.Linear(embedding_dim, embedding_dim) - ) - self.hard_encoding = nn.Linear(embedding_dim, 2) - - # Soft attention projections - self.q = nn.Linear(embedding_dim, embedding_dim, bias=False) - self.k = nn.Linear(10, embedding_dim, bias=False) - self.v = nn.Linear(10, embedding_dim) - - # Soft attention score network (with distance) - self.attn_score_layer = nn.Sequential( - nn.Linear(embedding_dim *2, embedding_dim), - nn.ReLU(), - nn.Linear(embedding_dim, 1) - ) - - # Decoder - self.decode_1 = nn.Linear(embedding_dim * 2, embedding_dim * 2) - nn.init.kaiming_uniform_(self.decode_1.weight, nonlinearity="leaky_relu") - self.decode_2 = nn.Linear(embedding_dim * 2, embedding_dim * 2) - nn.init.kaiming_uniform_(self.decode_2.weight, nonlinearity="leaky_relu") - - def encode_agent_features(self, embed): - embed = F.leaky_relu(self.embedding1(embed)) - embed = F.leaky_relu(self.embedding2(embed)) - return embed - - def forward(self, embedding): - if embedding.dim() == 2: - embedding = embedding.unsqueeze(0) - batch_size, n_agents, _ = embedding.shape - - embed = embedding[:, :, 4:9].reshape(batch_size * n_agents, -1) - position = embedding[:, :, :2].reshape(batch_size, n_agents, 2) - heading = embedding[:, :, 2:4].reshape(batch_size, n_agents, 2) # assume (cos(θ), sin(θ)) - action = embedding[:, :, 7:9].reshape(batch_size, n_agents, 2) - goal = embedding[:, :, -2:].reshape(batch_size, n_agents, 2) - goal_j = goal.unsqueeze(1).expand(-1, n_agents, -1, -1) # (B, N, N, 2) - pos_i = position.unsqueeze(2) # (B, N, 1, 2) - goal_rel_vec = goal_j - pos_i - - agent_embed = self.encode_agent_features(embed) - agent_embed = agent_embed.view(batch_size, n_agents, self.embedding_dim) - - # For hard attention - h_i = agent_embed.unsqueeze(2) # (B, N, 1, D) - pos_i = position.unsqueeze(2) # (B, N, 1, 2) - pos_j = position.unsqueeze(1) # (B, 1, N, 2) - heading_i = heading.unsqueeze(2) # (B, N, 1, 2) - heading_j = heading.unsqueeze(1).expand(-1, n_agents, -1, -1) # (B, 1, N, 2) - - # Compute relative vectors and distance - rel_vec = pos_j - pos_i # (B, N, N, 2) - dx, dy = rel_vec[..., 0], rel_vec[..., 1] - rel_dist = torch.linalg.vector_norm(rel_vec, dim=-1, keepdim=True)/12 # (B, N, N, 1) - - # Relative angle in agent i's frame - angle = torch.atan2(dy, dx) - torch.atan2(heading_i[..., 1], heading_i[..., 0]) - angle = (angle + np.pi) % (2 * np.pi) - np.pi - rel_angle_sin = torch.sin(angle) - rel_angle_cos = torch.cos(angle) - - # Other agent's heading - heading_j_cos = heading_j[..., 0] # (B, 1, N) - heading_j_sin = heading_j[..., 1] # (B, 1, N) - - # Stack edge features - edge_features = torch.cat([ - rel_dist, # (B, N, N, 1) - rel_angle_cos.unsqueeze(-1), # (B, N, N, 1) - rel_angle_sin.unsqueeze(-1), # (B, N, N, 1) - heading_j_cos.unsqueeze(-1), # (B, N, N, 1) - heading_j_sin.unsqueeze(-1), # (B, N, N, 1) - action.unsqueeze(1).expand(-1, n_agents, -1, -1) # (B, N, N, 2) - ], dim=-1) # (B, N, N, 7) - - # Broadcast h_i along N (for each pair) - h_i_expanded = h_i.expand(-1, -1, n_agents, -1) # (B, N, N, D) - - # Remove self-pairs using mask - mask = ~torch.eye(n_agents, dtype=torch.bool, device=embedding.device) - h_i_flat = h_i_expanded[:, mask].reshape(batch_size * n_agents, n_agents - 1, self.embedding_dim) - edge_flat = edge_features[:, mask].reshape(batch_size * n_agents, n_agents - 1, -1) - - # Concatenate agent embedding and edge features - hard_input = torch.cat([h_i_flat, edge_flat], dim=-1) # (B*N, N-1, D+7) - - # Hard attention forward - h_hard = self.hard_mlp(hard_input) - hard_logits = self.hard_encoding(h_hard) - hard_weights = F.gumbel_softmax(hard_logits, hard=False, tau=0.5, dim=-1)[..., 1].unsqueeze(2) - hard_weights = hard_weights.view(batch_size, n_agents, n_agents - 1) - - unnorm_rel_vec = rel_vec - unnorm_rel_dist = torch.linalg.vector_norm(unnorm_rel_vec, dim=-1, keepdim=True) - unnorm_rel_dist = unnorm_rel_dist[:, mask].reshape(batch_size * n_agents, n_agents - 1, 1) - - # Soft attention - q = self.q(agent_embed) - - attention_outputs = [] - entropy_list = [] - combined_w = [] - - goal_rel_dist = torch.linalg.vector_norm(goal_rel_vec, dim=-1, keepdim=True) - goal_angle_global = torch.atan2(goal_rel_vec[..., 1], goal_rel_vec[..., 0]) - heading_angle = torch.atan2(heading_i[..., 1], heading_i[..., 0]) - goal_rel_angle = goal_angle_global - heading_angle - goal_rel_angle = (goal_rel_angle + np.pi) % (2 * np.pi) - np.pi - goal_rel_angle_cos = torch.cos(goal_rel_angle).unsqueeze(-1) - goal_rel_angle_sin = torch.sin(goal_rel_angle).unsqueeze(-1) - goal_polar = torch.cat([goal_rel_dist, goal_rel_angle_cos, goal_rel_angle_sin], dim=-1) - - - - soft_edge_features = torch.cat([edge_features, goal_polar], dim=-1) - for i in range(n_agents): - q_i = q[:, i:i + 1, :] # (B, 1, D) - mask = torch.ones(n_agents, dtype=torch.bool, device=edge_features.device) - mask[i] = False - edge_i_wo_self = soft_edge_features[:, i, mask, :] - edge_i_wo_self = edge_i_wo_self.squeeze(1) # (B, N-1, 7) - k = F.leaky_relu(self.k(edge_i_wo_self)) - - q_i_expanded = q_i.expand(-1, n_agents - 1, -1) # (B, N-1, D) - attention_input = torch.cat([q_i_expanded, k], dim=-1) # (B, N-1, D+7) - - # Score computation - scores = self.attn_score_layer(attention_input).transpose(1, 2) # (B, 1, N-1) - - # Mask using hard weights - h_weights = hard_weights[:, i].unsqueeze(1) # (B, 1, N-1) - mask = (h_weights > 0.5).float() - - # All-zero mask handling - epsilon = 1e-6 - mask_sum = mask.sum(dim=-1, keepdim=True) - all_zero_mask = (mask_sum < epsilon) - - # Apply mask to scores - masked_scores = scores.masked_fill(mask == 0, float('-inf')) - - # Compute softmax, safely handle all-zero cases - soft_weights = F.softmax(masked_scores, dim=-1) - soft_weights = torch.where(all_zero_mask, torch.zeros_like(soft_weights), soft_weights) - - combined_weights = soft_weights * mask # (B, 1, N-1) - combined_w.append(combined_weights) - - # Normalize combined_weights for entropy calculation - combined_weights_norm = combined_weights / (combined_weights.sum(dim=-1, keepdim=True) + epsilon) - - # Calculate entropy from combined_weights - entropy = -(combined_weights_norm * (combined_weights_norm + epsilon).log()).sum(dim=-1).mean() - entropy_list.append(entropy) - - # Project each other agent's features to embedding dim *before* the attention-weighted sum - v_j = F.leaky_relu(self.v(edge_i_wo_self)) - attn_output = torch.bmm(combined_weights, v_j).squeeze(1) # (B, embedding_dim) - attention_outputs.append(attn_output) - - comb_w = torch.stack(combined_w, dim=1).reshape(n_agents, -1) - attn_stack = torch.stack(attention_outputs, dim=1).reshape(-1, self.embedding_dim) - self_embed = agent_embed.reshape(-1, self.embedding_dim) - concat_embed = torch.cat([self_embed, attn_stack], dim=-1) - - x = F.leaky_relu(self.decode_1(concat_embed)) - att_embedding = F.leaky_relu(self.decode_2(x)) - - mean_entropy = torch.stack(entropy_list).mean() - - return att_embedding, hard_logits[..., 1], unnorm_rel_dist, mean_entropy, hard_weights, comb_w - - -class Actor(nn.Module): - def __init__(self, action_dim, embedding_dim): - super().__init__() - self.attention = Attention(embedding_dim) # ➊ edge classifier - - # ➋ policy head (everything _after_ attention) - self.policy_head = nn.Sequential( - nn.Linear(embedding_dim * 2, 400), - nn.LeakyReLU(), - nn.Linear(400, 300), - nn.LeakyReLU(), - nn.Linear(300, action_dim), - nn.Tanh(), - ) - - def forward(self, obs, detach_attn=False): - attn_out, hard_logits, pair_d, mean_entropy, hard_weights, combined_weights = self.attention(obs) - if detach_attn: # used in the policy phase - attn_out = attn_out.detach() - action = self.policy_head(attn_out) - return action, hard_logits, pair_d, mean_entropy, hard_weights, combined_weights - - -class Critic(nn.Module): - def __init__(self, action_dim, embedding_dim): - super(Critic, self).__init__() - self.embedding_dim = embedding_dim - self.attention = Attention(self.embedding_dim) - - self.layer_1 = nn.Linear(self.embedding_dim * 2, 400) - torch.nn.init.kaiming_uniform_(self.layer_1.weight, nonlinearity="leaky_relu") - - self.layer_2_s = nn.Linear(400, 300) - torch.nn.init.kaiming_uniform_(self.layer_2_s.weight, nonlinearity="leaky_relu") - - self.layer_2_a = nn.Linear(action_dim, 300) - torch.nn.init.kaiming_uniform_(self.layer_2_a.weight, nonlinearity="leaky_relu") - - self.layer_3 = nn.Linear(300, 1) - torch.nn.init.kaiming_uniform_(self.layer_3.weight, nonlinearity="leaky_relu") - - self.layer_4 = nn.Linear(self.embedding_dim * 2, 400) - torch.nn.init.kaiming_uniform_(self.layer_4.weight, nonlinearity="leaky_relu") # ✅ Fixed init bug - - self.layer_5_s = nn.Linear(400, 300) - torch.nn.init.kaiming_uniform_(self.layer_5_s.weight, nonlinearity="leaky_relu") - - self.layer_5_a = nn.Linear(action_dim, 300) - torch.nn.init.kaiming_uniform_(self.layer_5_a.weight, nonlinearity="leaky_relu") - - self.layer_6 = nn.Linear(300, 1) - torch.nn.init.kaiming_uniform_(self.layer_6.weight, nonlinearity="leaky_relu") - - def forward(self, embedding, action): - - embedding_with_attention, hard_logits, unnorm_rel_dist, mean_entropy, hard_weights, _ = self.attention(embedding) - - # Q1 - s1 = F.leaky_relu(self.layer_1(embedding_with_attention)) - s1 = F.leaky_relu(self.layer_2_s(s1) + self.layer_2_a(action)) # ✅ No .data - q1 = self.layer_3(s1) - - # Q2 - s2 = F.leaky_relu(self.layer_4(embedding_with_attention)) - s2 = F.leaky_relu(self.layer_5_s(s2) + self.layer_5_a(action)) # ✅ No .data - q2 = self.layer_6(s2) - - return q1, q2, mean_entropy, hard_logits, unnorm_rel_dist, hard_weights - - - -# CNNTD3 network -class CNNTD3(object): - """ - CNNTD3 (Twin Delayed Deep Deterministic Policy Gradient with CNN-based inputs) agent for - continuous control tasks. - - This class encapsulates the full implementation of the TD3 algorithm using neural network - architectures for the actor and critic, with optional bounding for critic outputs to - regularize learning. The agent is designed to train in environments where sensor - observations (e.g., LiDAR) are used for navigation tasks. - - Args: - state_dim (int): Dimension of the input state. - action_dim (int): Dimension of the output action. - max_action (float): Maximum magnitude of the action. - device (torch.device): Torch device to use (CPU or GPU). - lr (float): Learning rate for both actor and critic optimizers. - save_every (int): Save model every N training iterations (0 to disable). - load_model (bool): Whether to load a pre-trained model at initialization. - save_directory (Path): Path to the directory for saving model checkpoints. - model_name (str): Base name for the saved model files. - load_directory (Path): Path to load model checkpoints from (if `load_model=True`). - use_max_bound (bool): Whether to apply maximum Q-value bounding during training. - bound_weight (float): Weight for the bounding loss term in total loss. - """ - - def __init__( - self, - state_dim, - action_dim, - max_action, - device, - num_robots, - lr_actor=1e-4, - lr_critic=3e-4, - save_every=0, - load_model=False, - save_directory=Path("robot_nav/models/CNNTD3/checkpoint"), - model_name="CNNTD3", - load_model_name = None, - load_directory=Path("robot_nav/models/CNNTD3/checkpoint"), - use_max_bound=False, - bound_weight=0.25, - ): - # Initialize the Actor network - self.num_robots = num_robots - self.device = device - self.actor = Actor(action_dim, embedding_dim=256).to(self.device) # Using the updated Actor - self.actor_target = Actor(action_dim, embedding_dim=256).to(self.device) - self.actor_target.load_state_dict(self.actor.state_dict()) - # self.actor_optimizer = torch.optim.Adam(params=self.actor.parameters(), lr=lr_actor) - - self.attn_params = list(self.actor.attention.parameters()) - self.policy_params = list(self.actor.policy_head.parameters()) - - # self.attn_opt = torch.optim.Adam(self.attn_params, lr=1e-3) # edge classifier - self.actor_optimizer = torch.optim.Adam(self.policy_params + self.attn_params, lr=lr_actor) # TD3 policy - - self.critic = Critic(action_dim, embedding_dim=256).to(self.device) # Using the updated Critic - self.critic_target = Critic(action_dim, embedding_dim=256).to(self.device) - self.critic_target.load_state_dict(self.critic.state_dict()) - self.critic_optimizer = torch.optim.Adam(params=self.critic.parameters(), lr=lr_critic) - self.action_dim = action_dim - self.max_action = max_action - self.state_dim = state_dim - self.writer = SummaryWriter(comment=model_name) - self.iter_count = 0 - if load_model_name is None: - load_model_name = model_name - if load_model: - self.load(filename=load_model_name, directory=load_directory) - self.save_every = save_every - self.model_name = model_name - self.save_directory = save_directory - self.use_max_bound = use_max_bound - self.bound_weight = bound_weight - - def get_action(self, obs, add_noise): - """ - Selects an action for a given observation. - - Args: - obs (np.ndarray): The current observation/state. - add_noise (bool): Whether to add exploration noise to the action. - - Returns: - (np.ndarray): The selected action. - """ - action, connection, combined_weights = self.act(obs) - if add_noise: - noise = np.random.normal(0, 0.5, size=action.shape) - noise = [n/4 if i%2 else n for i, n in enumerate(noise)] - action = (action + noise - ).clip(-self.max_action, self.max_action) - - return action.reshape(-1, 2), connection, combined_weights - - def act(self, state): - """ - Computes the deterministic action from the actor network for a given state. - - Args: - state (np.ndarray): Input state. - - Returns: - (np.ndarray): Action predicted by the actor network. - """ - # Function to get the action from the actor - state = torch.Tensor(state).to(self.device) - # res = self.attention(state) - action, connection, _, _, _, combined_weights = self.actor(state) - return action.cpu().data.numpy().flatten(), connection, combined_weights - - # training cycle - def train( - self, - replay_buffer, - iterations, - batch_size, - discount=0.99, - tau=0.005, - policy_noise=0.2, - noise_clip=0.5, - policy_freq=2, - max_lin_vel=0.5, - max_ang_vel=1, - goal_reward=100, - distance_norm=10, - time_step=0.3, - ): - av_Q = 0 - max_Q = -inf - av_loss = 0 - av_critic_loss = 0 - av_critic_entropy = [] - av_actor_entropy = [] - av_actor_loss = 0 - av_critic_bce_loss = [] - av_actor_bce_loss = [] - - for it in range(iterations): - # sample a batch - ( - batch_states, - batch_actions, - batch_rewards, - batch_dones, - batch_next_states, - ) = replay_buffer.sample_batch(batch_size) - - state = torch.Tensor(batch_states).to(self.device).view(batch_size, self.num_robots, self.state_dim) - next_state = torch.Tensor(batch_next_states).to(self.device).view(batch_size, self.num_robots, self.state_dim) - action = torch.Tensor(batch_actions).to(self.device).view(batch_size * self.num_robots, self.action_dim) - reward = torch.Tensor(batch_rewards).to(self.device).view(batch_size * self.num_robots, 1) - done = torch.Tensor(batch_dones).to(self.device).view(batch_size * self.num_robots, 1) - - with torch.no_grad(): - next_action, _, _, _, _, _ = self.actor_target(next_state, detach_attn=True) - - # --- Target smoothing --- - noise = ( - torch.Tensor(batch_actions) - .data.normal_(0, policy_noise) - .to(self.device) - ).reshape(-1, 2) - noise = noise.clamp(-noise_clip, noise_clip) - next_action = (next_action + noise).clamp(-self.max_action, self.max_action) - - # --- Target Q values --- - target_Q1, target_Q2, _, _, _, _ = self.critic_target(next_state, next_action) - target_Q = torch.min(target_Q1, target_Q2) - av_Q += target_Q.mean() - max_Q = max(max_Q, target_Q.max().item()) - target_Q = reward + ((1 - done) * discount * target_Q).detach() - - # --- Critic update --- - current_Q1, current_Q2, mean_entropy, hard_logits, unnorm_rel_dist, hard_weights = self.critic(state, action) - critic_loss = F.mse_loss(current_Q1, target_Q) + F.mse_loss(current_Q2, target_Q) - - proximity_threshold = 4 # You may need to adjust this - targets = (unnorm_rel_dist.flatten() < proximity_threshold).float() - flat_logits = hard_logits.flatten() - bce_loss = F.binary_cross_entropy_with_logits(flat_logits, targets) - # masked_weights = hard_weights.flatten()[mask] - # target = torch.ones_like(masked_weights) - # num_pos = masked_weights.numel() - # if num_pos > 0: - # bce_loss = F.binary_cross_entropy(masked_weights, target, reduction='sum') / num_pos - # else: - # bce_loss = torch.tensor(0.0, device=masked_weights.device) - - bce_weight = 0.1 - av_critic_bce_loss.append(bce_loss) - - critic_entropy_weight = 1 # or tuneable - total_loss = critic_loss - critic_entropy_weight * mean_entropy + bce_weight * bce_loss - av_critic_entropy.append(mean_entropy) - - self.critic_optimizer.zero_grad() - total_loss.backward() - torch.nn.utils.clip_grad_norm_(self.critic.parameters(), 10.0) - self.critic_optimizer.step() - - av_loss += total_loss.item() - av_critic_loss += critic_loss.item() - - # --- Actor update --- - if it % policy_freq == 0: - - action, hard_logits, unnorm_rel_dist, mean_entropy, hard_weights, _ = self.actor(state, detach_attn=False) - targets = (unnorm_rel_dist.flatten() < proximity_threshold).float() - flat_logits = hard_logits.flatten() - bce_loss = F.binary_cross_entropy_with_logits(flat_logits, targets) - # masked_weights = hard_weights.flatten()[mask] - # target = torch.ones_like(masked_weights) - # num_pos = masked_weights.numel() - # if num_pos > 0: - # bce_loss = F.binary_cross_entropy(masked_weights, target, reduction='sum') / num_pos - # else: - # bce_loss = torch.tensor(0.0, device=masked_weights.device) - - bce_weight = 0.1 - av_actor_bce_loss.append(bce_loss) - - actor_Q, _, _, _, _, _ = self.critic(state, action) - actor_loss = -actor_Q.mean() - actor_entropy_weight = 0.05 - total_loss = actor_loss - actor_entropy_weight * mean_entropy + bce_weight * bce_loss - av_actor_entropy.append(mean_entropy) - - self.actor_optimizer.zero_grad() - total_loss.backward() - torch.nn.utils.clip_grad_norm_(self.policy_params, 10.0) - self.actor_optimizer.step() - - av_actor_loss += total_loss.item() - - # Soft update target networks - for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()): - target_param.data.copy_(tau * param.data + (1 - tau) * target_param.data) - - for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()): - target_param.data.copy_(tau * param.data + (1 - tau) * target_param.data) - - self.iter_count += 1 - self.writer.add_scalar("train/loss_total", av_loss / iterations, self.iter_count) - self.writer.add_scalar("train/critic_loss", av_critic_loss / iterations, self.iter_count) - self.writer.add_scalar("train/av_critic_entropy", sum(av_critic_entropy) / len(av_critic_entropy), self.iter_count) - self.writer.add_scalar("train/av_actor_entropy", sum(av_actor_entropy) / len(av_actor_entropy), - self.iter_count) - self.writer.add_scalar("train/av_critic_bce_loss", sum(av_critic_bce_loss) / len(av_critic_bce_loss), - self.iter_count) - self.writer.add_scalar("train/av_actor_bce_loss", sum(av_actor_bce_loss) / len(av_actor_bce_loss), - self.iter_count) - self.writer.add_scalar("train/avg_Q", av_Q / iterations, self.iter_count) - self.writer.add_scalar("train/max_Q", max_Q, self.iter_count) - - self.writer.add_scalar("train/actor_loss", av_actor_loss / (iterations // policy_freq), self.iter_count) - - if self.save_every > 0 and self.iter_count % self.save_every == 0: - self.save(filename=self.model_name, directory=self.save_directory) - - def save(self, filename, directory): - """ - Saves the current model parameters to the specified directory. - - Args: - filename (str): Base filename for saved files. - directory (Path): Path to save the model files. - """ - Path(directory).mkdir(parents=True, exist_ok=True) - torch.save(self.actor.state_dict(), "%s/%s_actor.pth" % (directory, filename)) - torch.save( - self.actor_target.state_dict(), - "%s/%s_actor_target.pth" % (directory, filename), - ) - torch.save(self.critic.state_dict(), "%s/%s_critic.pth" % (directory, filename)) - torch.save( - self.critic_target.state_dict(), - "%s/%s_critic_target.pth" % (directory, filename), - ) - - def load(self, filename, directory): - """ - Loads model parameters from the specified directory. - - Args: - filename (str): Base filename for saved files. - directory (Path): Path to load the model files from. - """ - self.actor.load_state_dict( - torch.load("%s/%s_actor.pth" % (directory, filename)) - ) - self.actor_target.load_state_dict( - torch.load("%s/%s_actor_target.pth" % (directory, filename)) - ) - self.critic.load_state_dict( - torch.load("%s/%s_critic.pth" % (directory, filename)) - ) - self.critic_target.load_state_dict( - torch.load("%s/%s_critic_target.pth" % (directory, filename)) - ) - print(f"Loaded weights from: {directory}") - - def prepare_state(self, poses, distance, cos, sin, collision, goal, action, positions, goal_positions): - """ - Prepares the environment's raw agent state for learning. - - Args: - poses (list): Each agent's global pose [x, y, theta]. - distance, cos, sin: Unused, can be removed or ignored. - collision (list): Collision flags per agent. - goal (list): Goal reached flags per agent. - action (list): Last action taken [lin_vel, ang_vel]. - positions (list): Extra features (e.g., neighbors). - goal_positions (list): Each agent's goal [x, y]. - - Returns: - states (list): List of processed state vectors. - terminal (list): Flags (1 if collision or goal reached, else 0). - """ - states = [] - terminal = [] - - for i in range(self.num_robots): - pose = poses[i] # [x, y, theta] - goal_pos = goal_positions[i] # [goal_x, goal_y] - act = action[i] # [lin_vel, ang_vel] - - px, py, theta = pose - gx, gy = goal_pos - - # Heading as cos/sin - heading_cos = np.cos(theta) - heading_sin = np.sin(theta) - - # Last velocity - lin_vel = act[0] * 2 # Assuming original range [-0.5, 0.5] - ang_vel = (act[1] + 1) / 2 # Assuming original range [-1, 1] - - # Final state vector - state = [px, py, heading_cos, heading_sin, distance[i]/17, cos[i], sin[i], lin_vel, ang_vel, gx, gy] - - assert len(state) == self.state_dim, f"State length mismatch: expected {self.state_dim}, got {len(state)}" - states.append(state) - terminal.append(collision[i]) - - return states, terminal diff --git a/robot_nav/models/MARL/hsAttention.py b/robot_nav/models/MARL/hsAttention.py new file mode 100644 index 0000000..927c1df --- /dev/null +++ b/robot_nav/models/MARL/hsAttention.py @@ -0,0 +1,229 @@ +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class Attention(nn.Module): + def __init__(self, embedding_dim): + super(Attention, self).__init__() + self.embedding_dim = embedding_dim + + # CNN for laser scan + self.embedding1 = nn.Linear(5, 128) + nn.init.kaiming_uniform_(self.embedding1.weight, nonlinearity="leaky_relu") + self.embedding2 = nn.Linear(128, embedding_dim) + nn.init.kaiming_uniform_(self.embedding2.weight, nonlinearity="leaky_relu") + + # Hard attention MLP with distance + self.hard_mlp = nn.Sequential( + nn.Linear(embedding_dim + 7, embedding_dim), + nn.ReLU(), + nn.Linear(embedding_dim, embedding_dim), + ) + self.hard_encoding = nn.Linear(embedding_dim, 2) + + # Soft attention projections + self.q = nn.Linear(embedding_dim, embedding_dim, bias=False) + self.k = nn.Linear(10, embedding_dim, bias=False) + self.v = nn.Linear(10, embedding_dim) + + # Soft attention score network (with distance) + self.attn_score_layer = nn.Sequential( + nn.Linear(embedding_dim * 2, embedding_dim), + nn.ReLU(), + nn.Linear(embedding_dim, 1), + ) + + # Decoder + self.decode_1 = nn.Linear(embedding_dim * 2, embedding_dim * 2) + nn.init.kaiming_uniform_(self.decode_1.weight, nonlinearity="leaky_relu") + self.decode_2 = nn.Linear(embedding_dim * 2, embedding_dim * 2) + nn.init.kaiming_uniform_(self.decode_2.weight, nonlinearity="leaky_relu") + + def encode_agent_features(self, embed): + embed = F.leaky_relu(self.embedding1(embed)) + embed = F.leaky_relu(self.embedding2(embed)) + return embed + + def forward(self, embedding): + if embedding.dim() == 2: + embedding = embedding.unsqueeze(0) + batch_size, n_agents, _ = embedding.shape + + embed = embedding[:, :, 4:9].reshape(batch_size * n_agents, -1) + position = embedding[:, :, :2].reshape(batch_size, n_agents, 2) + heading = embedding[:, :, 2:4].reshape( + batch_size, n_agents, 2 + ) # assume (cos(θ), sin(θ)) + action = embedding[:, :, 7:9].reshape(batch_size, n_agents, 2) + goal = embedding[:, :, -2:].reshape(batch_size, n_agents, 2) + goal_j = goal.unsqueeze(1).expand(-1, n_agents, -1, -1) # (B, N, N, 2) + pos_i = position.unsqueeze(2) # (B, N, 1, 2) + goal_rel_vec = goal_j - pos_i + + agent_embed = self.encode_agent_features(embed) + agent_embed = agent_embed.view(batch_size, n_agents, self.embedding_dim) + + # For hard attention + h_i = agent_embed.unsqueeze(2) # (B, N, 1, D) + pos_i = position.unsqueeze(2) # (B, N, 1, 2) + pos_j = position.unsqueeze(1) # (B, 1, N, 2) + heading_i = heading.unsqueeze(2) # (B, N, 1, 2) + heading_j = heading.unsqueeze(1).expand(-1, n_agents, -1, -1) # (B, 1, N, 2) + + # Compute relative vectors and distance + rel_vec = pos_j - pos_i # (B, N, N, 2) + dx, dy = rel_vec[..., 0], rel_vec[..., 1] + rel_dist = ( + torch.linalg.vector_norm(rel_vec, dim=-1, keepdim=True) / 12 + ) # (B, N, N, 1) + + # Relative angle in agent i's frame + angle = torch.atan2(dy, dx) - torch.atan2(heading_i[..., 1], heading_i[..., 0]) + angle = (angle + np.pi) % (2 * np.pi) - np.pi + rel_angle_sin = torch.sin(angle) + rel_angle_cos = torch.cos(angle) + + # Other agent's heading + heading_j_cos = heading_j[..., 0] # (B, 1, N) + heading_j_sin = heading_j[..., 1] # (B, 1, N) + + # Stack edge features + edge_features = torch.cat( + [ + rel_dist, # (B, N, N, 1) + rel_angle_cos.unsqueeze(-1), # (B, N, N, 1) + rel_angle_sin.unsqueeze(-1), # (B, N, N, 1) + heading_j_cos.unsqueeze(-1), # (B, N, N, 1) + heading_j_sin.unsqueeze(-1), # (B, N, N, 1) + action.unsqueeze(1).expand(-1, n_agents, -1, -1), # (B, N, N, 2) + ], + dim=-1, + ) # (B, N, N, 7) + + # Broadcast h_i along N (for each pair) + h_i_expanded = h_i.expand(-1, -1, n_agents, -1) # (B, N, N, D) + + # Remove self-pairs using mask + mask = ~torch.eye(n_agents, dtype=torch.bool, device=embedding.device) + h_i_flat = h_i_expanded[:, mask].reshape( + batch_size * n_agents, n_agents - 1, self.embedding_dim + ) + edge_flat = edge_features[:, mask].reshape( + batch_size * n_agents, n_agents - 1, -1 + ) + + # Concatenate agent embedding and edge features + hard_input = torch.cat([h_i_flat, edge_flat], dim=-1) # (B*N, N-1, D+7) + + # Hard attention forward + h_hard = self.hard_mlp(hard_input) + hard_logits = self.hard_encoding(h_hard) + hard_weights = F.gumbel_softmax(hard_logits, hard=False, tau=0.5, dim=-1)[ + ..., 1 + ].unsqueeze(2) + hard_weights = hard_weights.view(batch_size, n_agents, n_agents - 1) + + unnorm_rel_vec = rel_vec + unnorm_rel_dist = torch.linalg.vector_norm(unnorm_rel_vec, dim=-1, keepdim=True) + unnorm_rel_dist = unnorm_rel_dist[:, mask].reshape( + batch_size * n_agents, n_agents - 1, 1 + ) + + # Soft attention + q = self.q(agent_embed) + + attention_outputs = [] + entropy_list = [] + combined_w = [] + + goal_rel_dist = torch.linalg.vector_norm(goal_rel_vec, dim=-1, keepdim=True) + goal_angle_global = torch.atan2(goal_rel_vec[..., 1], goal_rel_vec[..., 0]) + heading_angle = torch.atan2(heading_i[..., 1], heading_i[..., 0]) + goal_rel_angle = goal_angle_global - heading_angle + goal_rel_angle = (goal_rel_angle + np.pi) % (2 * np.pi) - np.pi + goal_rel_angle_cos = torch.cos(goal_rel_angle).unsqueeze(-1) + goal_rel_angle_sin = torch.sin(goal_rel_angle).unsqueeze(-1) + goal_polar = torch.cat( + [goal_rel_dist, goal_rel_angle_cos, goal_rel_angle_sin], dim=-1 + ) + + soft_edge_features = torch.cat([edge_features, goal_polar], dim=-1) + for i in range(n_agents): + q_i = q[:, i : i + 1, :] # (B, 1, D) + mask = torch.ones(n_agents, dtype=torch.bool, device=edge_features.device) + mask[i] = False + edge_i_wo_self = soft_edge_features[:, i, mask, :] + edge_i_wo_self = edge_i_wo_self.squeeze(1) # (B, N-1, 7) + k = F.leaky_relu(self.k(edge_i_wo_self)) + + q_i_expanded = q_i.expand(-1, n_agents - 1, -1) # (B, N-1, D) + attention_input = torch.cat([q_i_expanded, k], dim=-1) # (B, N-1, D+7) + + # Score computation + scores = self.attn_score_layer(attention_input).transpose( + 1, 2 + ) # (B, 1, N-1) + + # Mask using hard weights + h_weights = hard_weights[:, i].unsqueeze(1) # (B, 1, N-1) + mask = (h_weights > 0.5).float() + + # All-zero mask handling + epsilon = 1e-6 + mask_sum = mask.sum(dim=-1, keepdim=True) + all_zero_mask = mask_sum < epsilon + + # Apply mask to scores + masked_scores = scores.masked_fill(mask == 0, float("-inf")) + + # Compute softmax, safely handle all-zero cases + soft_weights = F.softmax(masked_scores, dim=-1) + soft_weights = torch.where( + all_zero_mask, torch.zeros_like(soft_weights), soft_weights + ) + + combined_weights = soft_weights * mask # (B, 1, N-1) + combined_w.append(combined_weights) + + # Normalize combined_weights for entropy calculation + combined_weights_norm = combined_weights / ( + combined_weights.sum(dim=-1, keepdim=True) + epsilon + ) + + # Calculate entropy from combined_weights + entropy = ( + -(combined_weights_norm * (combined_weights_norm + epsilon).log()) + .sum(dim=-1) + .mean() + ) + entropy_list.append(entropy) + + # Project each other agent's features to embedding dim *before* the attention-weighted sum + v_j = F.leaky_relu(self.v(edge_i_wo_self)) + attn_output = torch.bmm(combined_weights, v_j).squeeze( + 1 + ) # (B, embedding_dim) + attention_outputs.append(attn_output) + + comb_w = torch.stack(combined_w, dim=1).reshape(n_agents, -1) + attn_stack = torch.stack(attention_outputs, dim=1).reshape( + -1, self.embedding_dim + ) + self_embed = agent_embed.reshape(-1, self.embedding_dim) + concat_embed = torch.cat([self_embed, attn_stack], dim=-1) + + x = F.leaky_relu(self.decode_1(concat_embed)) + att_embedding = F.leaky_relu(self.decode_2(x)) + + mean_entropy = torch.stack(entropy_list).mean() + + return ( + att_embedding, + hard_logits[..., 1], + unnorm_rel_dist, + mean_entropy, + hard_weights, + comb_w, + ) diff --git a/robot_nav/models/MARL/marlTD3.py b/robot_nav/models/MARL/marlTD3.py new file mode 100644 index 0000000..a67eb3d --- /dev/null +++ b/robot_nav/models/MARL/marlTD3.py @@ -0,0 +1,510 @@ +from pathlib import Path + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from numpy import inf +from torch.utils.tensorboard import SummaryWriter + +from robot_nav.models.MARL.hsAttention import Attention + + +class Actor(nn.Module): + def __init__(self, action_dim, embedding_dim): + super().__init__() + self.attention = Attention(embedding_dim) # ➊ edge classifier + + # ➋ policy head (everything _after_ attention) + self.policy_head = nn.Sequential( + nn.Linear(embedding_dim * 2, 400), + nn.LeakyReLU(), + nn.Linear(400, 300), + nn.LeakyReLU(), + nn.Linear(300, action_dim), + nn.Tanh(), + ) + + def forward(self, obs, detach_attn=False): + attn_out, hard_logits, pair_d, mean_entropy, hard_weights, combined_weights = ( + self.attention(obs) + ) + if detach_attn: # used in the policy phase + attn_out = attn_out.detach() + action = self.policy_head(attn_out) + return action, hard_logits, pair_d, mean_entropy, hard_weights, combined_weights + + +class Critic(nn.Module): + def __init__(self, action_dim, embedding_dim): + super(Critic, self).__init__() + self.embedding_dim = embedding_dim + self.attention = Attention(self.embedding_dim) + + self.layer_1 = nn.Linear(self.embedding_dim * 2, 400) + torch.nn.init.kaiming_uniform_(self.layer_1.weight, nonlinearity="leaky_relu") + + self.layer_2_s = nn.Linear(400, 300) + torch.nn.init.kaiming_uniform_(self.layer_2_s.weight, nonlinearity="leaky_relu") + + self.layer_2_a = nn.Linear(action_dim, 300) + torch.nn.init.kaiming_uniform_(self.layer_2_a.weight, nonlinearity="leaky_relu") + + self.layer_3 = nn.Linear(300, 1) + torch.nn.init.kaiming_uniform_(self.layer_3.weight, nonlinearity="leaky_relu") + + self.layer_4 = nn.Linear(self.embedding_dim * 2, 400) + torch.nn.init.kaiming_uniform_( + self.layer_4.weight, nonlinearity="leaky_relu" + ) # ✅ Fixed init bug + + self.layer_5_s = nn.Linear(400, 300) + torch.nn.init.kaiming_uniform_(self.layer_5_s.weight, nonlinearity="leaky_relu") + + self.layer_5_a = nn.Linear(action_dim, 300) + torch.nn.init.kaiming_uniform_(self.layer_5_a.weight, nonlinearity="leaky_relu") + + self.layer_6 = nn.Linear(300, 1) + torch.nn.init.kaiming_uniform_(self.layer_6.weight, nonlinearity="leaky_relu") + + def forward(self, embedding, action): + + ( + embedding_with_attention, + hard_logits, + unnorm_rel_dist, + mean_entropy, + hard_weights, + _, + ) = self.attention(embedding) + + # Q1 + s1 = F.leaky_relu(self.layer_1(embedding_with_attention)) + s1 = F.leaky_relu(self.layer_2_s(s1) + self.layer_2_a(action)) # ✅ No .data + q1 = self.layer_3(s1) + + # Q2 + s2 = F.leaky_relu(self.layer_4(embedding_with_attention)) + s2 = F.leaky_relu(self.layer_5_s(s2) + self.layer_5_a(action)) # ✅ No .data + q2 = self.layer_6(s2) + + return q1, q2, mean_entropy, hard_logits, unnorm_rel_dist, hard_weights + + +class TD3(object): + """ + CNNTD3 (Twin Delayed Deep Deterministic Policy Gradient with CNN-based inputs) agent for + continuous control tasks. + + This class encapsulates the full implementation of the TD3 algorithm using neural network + architectures for the actor and critic, with optional bounding for critic outputs to + regularize learning. The agent is designed to train in environments where sensor + observations (e.g., LiDAR) are used for navigation tasks. + + Args: + state_dim (int): Dimension of the input state. + action_dim (int): Dimension of the output action. + max_action (float): Maximum magnitude of the action. + device (torch.device): Torch device to use (CPU or GPU). + lr (float): Learning rate for both actor and critic optimizers. + save_every (int): Save model every N training iterations (0 to disable). + load_model (bool): Whether to load a pre-trained model at initialization. + save_directory (Path): Path to the directory for saving model checkpoints. + model_name (str): Base name for the saved model files. + load_directory (Path): Path to load model checkpoints from (if `load_model=True`). + use_max_bound (bool): Whether to apply maximum Q-value bounding during training. + bound_weight (float): Weight for the bounding loss term in total loss. + """ + + def __init__( + self, + state_dim, + action_dim, + max_action, + device, + num_robots, + lr_actor=1e-4, + lr_critic=3e-4, + save_every=0, + load_model=False, + save_directory=Path("robot_nav/models/MARL/checkpoint"), + model_name="marlTD3", + load_model_name=None, + load_directory=Path("robot_nav/models/MARL/checkpoint"), + ): + # Initialize the Actor network + self.num_robots = num_robots + self.device = device + self.actor = Actor(action_dim, embedding_dim=256).to( + self.device + ) # Using the updated Actor + self.actor_target = Actor(action_dim, embedding_dim=256).to(self.device) + self.actor_target.load_state_dict(self.actor.state_dict()) + + self.attn_params = list(self.actor.attention.parameters()) + self.policy_params = list(self.actor.policy_head.parameters()) + + self.actor_optimizer = torch.optim.Adam( + self.policy_params + self.attn_params, lr=lr_actor + ) # TD3 policy + + self.critic = Critic(action_dim, embedding_dim=256).to( + self.device + ) # Using the updated Critic + self.critic_target = Critic(action_dim, embedding_dim=256).to(self.device) + self.critic_target.load_state_dict(self.critic.state_dict()) + self.critic_optimizer = torch.optim.Adam( + params=self.critic.parameters(), lr=lr_critic + ) + self.action_dim = action_dim + self.max_action = max_action + self.state_dim = state_dim + self.writer = SummaryWriter(comment=model_name) + self.iter_count = 0 + if load_model_name is None: + load_model_name = model_name + if load_model: + self.load(filename=load_model_name, directory=load_directory) + self.save_every = save_every + self.model_name = model_name + self.save_directory = save_directory + + def get_action(self, obs, add_noise): + """ + Selects an action for a given observation. + + Args: + obs (np.ndarray): The current observation/state. + add_noise (bool): Whether to add exploration noise to the action. + + Returns: + (np.ndarray): The selected action. + """ + action, connection, combined_weights = self.act(obs) + if add_noise: + noise = np.random.normal(0, 0.5, size=action.shape) + noise = [n / 4 if i % 2 else n for i, n in enumerate(noise)] + action = (action + noise).clip(-self.max_action, self.max_action) + + return action.reshape(-1, 2), connection, combined_weights + + def act(self, state): + """ + Computes the deterministic action from the actor network for a given state. + + Args: + state (np.ndarray): Input state. + + Returns: + (np.ndarray): Action predicted by the actor network. + """ + # Function to get the action from the actor + state = torch.Tensor(state).to(self.device) + # res = self.attention(state) + action, connection, _, _, _, combined_weights = self.actor(state) + return action.cpu().data.numpy().flatten(), connection, combined_weights + + # training cycle + def train( + self, + replay_buffer, + iterations, + batch_size, + discount=0.99, + tau=0.005, + policy_noise=0.2, + noise_clip=0.5, + policy_freq=2, + ): + av_Q = 0 + max_Q = -inf + av_loss = 0 + av_critic_loss = 0 + av_critic_entropy = [] + av_actor_entropy = [] + av_actor_loss = 0 + av_critic_bce_loss = [] + av_actor_bce_loss = [] + + for it in range(iterations): + # sample a batch + ( + batch_states, + batch_actions, + batch_rewards, + batch_dones, + batch_next_states, + ) = replay_buffer.sample_batch(batch_size) + + state = ( + torch.Tensor(batch_states) + .to(self.device) + .view(batch_size, self.num_robots, self.state_dim) + ) + next_state = ( + torch.Tensor(batch_next_states) + .to(self.device) + .view(batch_size, self.num_robots, self.state_dim) + ) + action = ( + torch.Tensor(batch_actions) + .to(self.device) + .view(batch_size * self.num_robots, self.action_dim) + ) + reward = ( + torch.Tensor(batch_rewards) + .to(self.device) + .view(batch_size * self.num_robots, 1) + ) + done = ( + torch.Tensor(batch_dones) + .to(self.device) + .view(batch_size * self.num_robots, 1) + ) + + with torch.no_grad(): + next_action, _, _, _, _, _ = self.actor_target( + next_state, detach_attn=True + ) + + # --- Target smoothing --- + noise = ( + torch.Tensor(batch_actions) + .data.normal_(0, policy_noise) + .to(self.device) + ).reshape(-1, 2) + noise = noise.clamp(-noise_clip, noise_clip) + next_action = (next_action + noise).clamp(-self.max_action, self.max_action) + + # --- Target Q values --- + target_Q1, target_Q2, _, _, _, _ = self.critic_target( + next_state, next_action + ) + target_Q = torch.min(target_Q1, target_Q2) + av_Q += target_Q.mean() + max_Q = max(max_Q, target_Q.max().item()) + target_Q = reward + ((1 - done) * discount * target_Q).detach() + + # --- Critic update --- + ( + current_Q1, + current_Q2, + mean_entropy, + hard_logits, + unnorm_rel_dist, + hard_weights, + ) = self.critic(state, action) + critic_loss = F.mse_loss(current_Q1, target_Q) + F.mse_loss( + current_Q2, target_Q + ) + + proximity_threshold = 4 # You may need to adjust this + targets = (unnorm_rel_dist.flatten() < proximity_threshold).float() + flat_logits = hard_logits.flatten() + bce_loss = F.binary_cross_entropy_with_logits(flat_logits, targets) + + bce_weight = 0.1 + av_critic_bce_loss.append(bce_loss) + + critic_entropy_weight = 1 # or tuneable + total_loss = ( + critic_loss + - critic_entropy_weight * mean_entropy + + bce_weight * bce_loss + ) + av_critic_entropy.append(mean_entropy) + + self.critic_optimizer.zero_grad() + total_loss.backward() + torch.nn.utils.clip_grad_norm_(self.critic.parameters(), 10.0) + self.critic_optimizer.step() + + av_loss += total_loss.item() + av_critic_loss += critic_loss.item() + + # --- Actor update --- + if it % policy_freq == 0: + + action, hard_logits, unnorm_rel_dist, mean_entropy, hard_weights, _ = ( + self.actor(state, detach_attn=False) + ) + targets = (unnorm_rel_dist.flatten() < proximity_threshold).float() + flat_logits = hard_logits.flatten() + bce_loss = F.binary_cross_entropy_with_logits(flat_logits, targets) + + bce_weight = 0.1 + av_actor_bce_loss.append(bce_loss) + + actor_Q, _, _, _, _, _ = self.critic(state, action) + actor_loss = -actor_Q.mean() + actor_entropy_weight = 0.05 + total_loss = ( + actor_loss + - actor_entropy_weight * mean_entropy + + bce_weight * bce_loss + ) + av_actor_entropy.append(mean_entropy) + + self.actor_optimizer.zero_grad() + total_loss.backward() + torch.nn.utils.clip_grad_norm_(self.policy_params, 10.0) + self.actor_optimizer.step() + + av_actor_loss += total_loss.item() + + # Soft update target networks + for param, target_param in zip( + self.actor.parameters(), self.actor_target.parameters() + ): + target_param.data.copy_( + tau * param.data + (1 - tau) * target_param.data + ) + + for param, target_param in zip( + self.critic.parameters(), self.critic_target.parameters() + ): + target_param.data.copy_( + tau * param.data + (1 - tau) * target_param.data + ) + + self.iter_count += 1 + self.writer.add_scalar( + "train/loss_total", av_loss / iterations, self.iter_count + ) + self.writer.add_scalar( + "train/critic_loss", av_critic_loss / iterations, self.iter_count + ) + self.writer.add_scalar( + "train/av_critic_entropy", + sum(av_critic_entropy) / len(av_critic_entropy), + self.iter_count, + ) + self.writer.add_scalar( + "train/av_actor_entropy", + sum(av_actor_entropy) / len(av_actor_entropy), + self.iter_count, + ) + self.writer.add_scalar( + "train/av_critic_bce_loss", + sum(av_critic_bce_loss) / len(av_critic_bce_loss), + self.iter_count, + ) + self.writer.add_scalar( + "train/av_actor_bce_loss", + sum(av_actor_bce_loss) / len(av_actor_bce_loss), + self.iter_count, + ) + self.writer.add_scalar("train/avg_Q", av_Q / iterations, self.iter_count) + self.writer.add_scalar("train/max_Q", max_Q, self.iter_count) + + self.writer.add_scalar( + "train/actor_loss", + av_actor_loss / (iterations // policy_freq), + self.iter_count, + ) + + if self.save_every > 0 and self.iter_count % self.save_every == 0: + self.save(filename=self.model_name, directory=self.save_directory) + + def save(self, filename, directory): + """ + Saves the current model parameters to the specified directory. + + Args: + filename (str): Base filename for saved files. + directory (Path): Path to save the model files. + """ + Path(directory).mkdir(parents=True, exist_ok=True) + torch.save(self.actor.state_dict(), "%s/%s_actor.pth" % (directory, filename)) + torch.save( + self.actor_target.state_dict(), + "%s/%s_actor_target.pth" % (directory, filename), + ) + torch.save(self.critic.state_dict(), "%s/%s_critic.pth" % (directory, filename)) + torch.save( + self.critic_target.state_dict(), + "%s/%s_critic_target.pth" % (directory, filename), + ) + + def load(self, filename, directory): + """ + Loads model parameters from the specified directory. + + Args: + filename (str): Base filename for saved files. + directory (Path): Path to load the model files from. + """ + self.actor.load_state_dict( + torch.load("%s/%s_actor.pth" % (directory, filename)) + ) + self.actor_target.load_state_dict( + torch.load("%s/%s_actor_target.pth" % (directory, filename)) + ) + self.critic.load_state_dict( + torch.load("%s/%s_critic.pth" % (directory, filename)) + ) + self.critic_target.load_state_dict( + torch.load("%s/%s_critic_target.pth" % (directory, filename)) + ) + print(f"Loaded weights from: {directory}") + + def prepare_state( + self, poses, distance, cos, sin, collision, action, goal_positions + ): + """ + Prepares the environment's raw agent state for learning. + + Args: + poses (list): Each agent's global pose [x, y, theta]. + distance, cos, sin: Unused, can be removed or ignored. + collision (list): Collision flags per agent. + goal (list): Goal reached flags per agent. + action (list): Last action taken [lin_vel, ang_vel]. + positions (list): Extra features (e.g., neighbors). + goal_positions (list): Each agent's goal [x, y]. + + Returns: + states (list): List of processed state vectors. + terminal (list): Flags (1 if collision or goal reached, else 0). + """ + states = [] + terminal = [] + + for i in range(self.num_robots): + pose = poses[i] # [x, y, theta] + goal_pos = goal_positions[i] # [goal_x, goal_y] + act = action[i] # [lin_vel, ang_vel] + + px, py, theta = pose + gx, gy = goal_pos + + # Heading as cos/sin + heading_cos = np.cos(theta) + heading_sin = np.sin(theta) + + # Last velocity + lin_vel = act[0] * 2 # Assuming original range [-0.5, 0.5] + ang_vel = (act[1] + 1) / 2 # Assuming original range [-1, 1] + + # Final state vector + state = [ + px, + py, + heading_cos, + heading_sin, + distance[i] / 17, + cos[i], + sin[i], + lin_vel, + ang_vel, + gx, + gy, + ] + + assert ( + len(state) == self.state_dim + ), f"State length mismatch: expected {self.state_dim}, got {len(state)}" + states.append(state) + terminal.append(collision[i]) + + return states, terminal diff --git a/robot_nav/multi_robot_world.yaml b/robot_nav/multi_robot_world.yaml index b62285f..2a822f8 100644 --- a/robot_nav/multi_robot_world.yaml +++ b/robot_nav/multi_robot_world.yaml @@ -6,7 +6,7 @@ world: collision_mode: 'reactive' robot: - - number: 3 + - number: 5 kinematics: {name: 'diff'} distribution: {name: 'circle', radius: 4, center: [6, 6]} shape: {name: 'circle', radius: 0.2} @@ -14,45 +14,14 @@ robot: vel_max: [ 1.0, 1.0 ] state: [2, 2, 0, 0] goal: [9, 9, 0] - color: ['royalblue', 'red', 'green', 'orange', 'purple'] + color: ['royalblue', 'red', 'green', 'orange', 'purple', 'yellow', 'cyan', 'magenta', 'lime', 'pink', 'brown'] arrive_mode: position goal_threshold: 0.3 - sensors: - - type: 'lidar2d' - range_min: 0 - range_max: 7 - angle_range: 3.14 - number: 90 - noise: True - std: 0.08 - angle_std: 0.1 - offset: [ 0, 0, 0 ] - alpha: 0.3 - plot: show_trajectory: False - show_sensor: False -obstacle: - - number: 4 - kinematics: {name: 'static'} - distribution: {name: 'random', range_low: [0, 0, -3.14], range_high: [12, 12, 3.14]} - behavior: {name: 'rvo', wander: True, range_low: [0, 0, -3.14], range_high: [12, 12, 3.14], vxmax: 0.2, vymax: 0.2, factor: 1.0} - vel_max: [0.2, 0.2] - vel_min: [-0.2, -0.2] - shape: - - {name: 'circle', radius: 1.0, random_shape: True} - - {name: 'polygon', random_shape: true, avg_radius_range: [0.5, 1.0], irregularity_range: [0, 0.4], spikeyness_range: [0, 0.4], num_vertices_range: [4, 6]} - - shape: {name: 'circle', radius: 0.8} # length, width - state: [ 5, 10, 1 ] - kinematics: { name: 'static' } - - shape: { name: 'rectangle', length: 1.0, width: 1.2 } # length, width - state: [ 8, 5, 1 ] - kinematics: {name: 'static'} - - shape: { name: 'rectangle', length: 0.5, width: 2.1 } # length, width - state: [ 1, 8, 1.3 ] - kinematics: {name: 'static'} - - shape: { name: 'linestring', vertices: [ [ 0, 0 ], [ 12, 0 ], [ 12, 12 ], [ 0, 12 ],[ 0, 0 ] ] } # vertices - kinematics: {name: 'static'} - state: [ 0, 0, 0 ] +#obstacle: +# - shape: { name: 'linestring', vertices: [ [ 0, 0 ], [ 12, 0 ], [ 12, 12 ], [ 0, 12 ],[ 0, 0 ] ] } # vertices +# kinematics: {name: 'static'} +# state: [ 0, 0, 0 ] diff --git a/robot_nav/multi_robot_world2.yaml b/robot_nav/multi_robot_world2.yaml deleted file mode 100644 index 2a822f8..0000000 --- a/robot_nav/multi_robot_world2.yaml +++ /dev/null @@ -1,27 +0,0 @@ -world: - height: 12 # the height of the world - width: 12 # the height of the world - step_time: 0.3 # 10Hz calculate each step - sample_time: 0.3 # 10 Hz for render and data extraction - collision_mode: 'reactive' - -robot: - - number: 5 - kinematics: {name: 'diff'} - distribution: {name: 'circle', radius: 4, center: [6, 6]} - shape: {name: 'circle', radius: 0.2} - vel_min: [ 0, -1.0 ] - vel_max: [ 1.0, 1.0 ] - state: [2, 2, 0, 0] - goal: [9, 9, 0] - color: ['royalblue', 'red', 'green', 'orange', 'purple', 'yellow', 'cyan', 'magenta', 'lime', 'pink', 'brown'] - arrive_mode: position - goal_threshold: 0.3 - - plot: - show_trajectory: False - -#obstacle: -# - shape: { name: 'linestring', vertices: [ [ 0, 0 ], [ 12, 0 ], [ 12, 12 ], [ 0, 12 ],[ 0, 0 ] ] } # vertices -# kinematics: {name: 'static'} -# state: [ 0, 0, 0 ] diff --git a/robot_nav/multi_train.py b/robot_nav/multi_train.py deleted file mode 100644 index 97d34e1..0000000 --- a/robot_nav/multi_train.py +++ /dev/null @@ -1,145 +0,0 @@ -from robot_nav.models.TD3.TD3 import TD3 -from robot_nav.models.DDPG.DDPG import DDPG -from robot_nav.models.SAC.SAC import SAC -from robot_nav.models.HCM.hardcoded_model import HCM -from robot_nav.models.PPO.PPO import PPO -from robot_nav.models.CNNTD3.CNNTD3 import CNNTD3 - -import torch -import numpy as np -from sim import SIM_ENV -from utils import get_buffer - - -def main(args=None): - """Main training function""" - action_dim = 2 # number of actions produced by the model - max_action = 1 # maximum absolute value of output actions - state_dim = 97 # number of input values in the neural network (vector length of state input) - device = torch.device( - "cuda" if torch.cuda.is_available() else "cpu" - ) # using cuda if it is available, cpu otherwise - nr_eval_episodes = 10 # how many episodes to use to run evaluation - max_epochs = 60 # max number of epochs - epoch = 0 # starting epoch number - episodes_per_epoch = 70 # how many episodes to run in single epoch - episode = 0 # starting episode number - train_every_n = 2 # train and update network parameters every n episodes - training_iterations = 80 # how many batches to use for single training cycle - batch_size = 12 # batch size for each training iteration - max_steps = 300 # maximum number of steps in single episode - steps = 0 # starting step number - load_saved_buffer = False # whether to load experiences from assets/data.yml - pretrain = False # whether to use the loaded experiences to pre-train the model (load_saved_buffer must be True) - pretraining_iterations = ( - 10 # number of training iterations to run during pre-training - ) - save_every = 5 # save the model every n training cycles - - - - sim = SIM_ENV(world_file="multi_robot_world.yaml",disable_plotting=False) # instantiate environment - - model = CNNTD3( - state_dim=state_dim, - action_dim=action_dim, - max_action=max_action, - num_robots=sim.num_robots, - device=device, - save_every=save_every, - load_model=False, - model_name="CNNTD3", - ) # instantiate a model - - replay_buffer = get_buffer( - model, - sim, - load_saved_buffer, - pretrain, - pretraining_iterations, - training_iterations, - batch_size, - ) - con = torch.tensor([[[0,0], [0,0], [0,0]]]) - - latest_scan, distance, cos, sin, collision, goal, a, reward, positions = sim.step([[0, 0] for _ in range(sim.num_robots)], con) # get the initial step state - - while epoch < max_epochs: # train until max_epochs is reached - state, terminal = model.prepare_state( - latest_scan, distance, cos, sin, collision, goal, a, positions - ) # get state a state representation from returned data from the environment - - action, connection = model.get_action(np.array(state), False) # get an action from the model - - a_in = [[(a[0] + 1) / 4, a[1]] for a in action] # clip linear velocity to [0, 0.5] m/s range - - latest_scan, distance, cos, sin, collision, goal, a, reward, positions = sim.step(a_in, connection) # get data from the environment - next_state, terminal = model.prepare_state( - latest_scan, distance, cos, sin, collision, goal, a, positions - ) # get a next state representation - replay_buffer.add( - state, action, reward, terminal, next_state - ) # add experience to the replay buffer - - if ( - any(terminal) or steps == max_steps - ): # reset environment of terminal stat ereached, or max_steps were taken - latest_scan, distance, cos, sin, collision, goal, a, reward, positions = sim.reset() - episode += 1 - if episode % train_every_n == 0: - model.train( - replay_buffer=replay_buffer, - iterations=training_iterations, - batch_size=batch_size, - ) # train the model and update its parameters - - steps = 0 - else: - steps += 1 - - if ( - episode + 1 - ) % episodes_per_epoch == 0: # if epoch is concluded, run evaluation - episode = 0 - epoch += 1 - evaluate(model, epoch, sim, eval_episodes=nr_eval_episodes) - - -def evaluate(model, epoch, sim, eval_episodes=10): - print("..............................................") - print(f"Epoch {epoch}. Evaluating scenarios") - avg_reward = 0.0 - col = 0 - goals = 0 - for _ in range(eval_episodes): - count = 0 - latest_scan, distance, cos, sin, collision, goal, a, reward, positions = sim.reset() - done = False - while not done and count < 501: - state, terminal = model.prepare_state( - latest_scan, distance, cos, sin, collision, goal, a, positions - ) - action, connection = model.get_action(np.array(state), False) - a_in = [[(a[0] + 1) / 4, a[1]] for a in action] - latest_scan, distance, cos, sin, collision, goal, a, reward, positions = sim.step(a_in, connection) - avg_reward += sum(reward)/len(reward) - count += 1 - if collision: - col += 1 - if goal: - goals += 1 - done = collision or goal - avg_reward /= eval_episodes - avg_col = col / eval_episodes - avg_goal = goals / eval_episodes - print(f"Average Reward: {avg_reward}") - print(f"Average Collision rate: {avg_col}") - print(f"Average Goal rate: {avg_goal}") - print("..............................................") - model.writer.add_scalar("eval/avg_reward", avg_reward, epoch) - model.writer.add_scalar("eval/avg_col", avg_col, epoch) - model.writer.add_scalar("eval/avg_goal", avg_goal, epoch) - - -if __name__ == "__main__": - main() diff --git a/robot_nav/multi_train2.py b/robot_nav/multi_train2.py deleted file mode 100644 index 0e83c4f..0000000 --- a/robot_nav/multi_train2.py +++ /dev/null @@ -1,167 +0,0 @@ -from robot_nav.models.TD3.TD3 import TD3 -from robot_nav.models.DDPG.DDPG import DDPG -from robot_nav.models.SAC.SAC import SAC -from robot_nav.models.HCM.hardcoded_model import HCM -from robot_nav.models.PPO.PPO import PPO -from robot_nav.models.CNNTD3.att import CNNTD3 - -import torch -import numpy as np -from sim2 import SIM_ENV -from utils import get_buffer - -def outside_of_bounds(poses): - outside = False - for pose in poses: - norm_x = pose[0] - 6 - norm_y = pose[1] - 6 - if abs(norm_x) > 10.5 or abs(norm_y) > 10.5: - outside = True - break - return outside - -def main(args=None): - """Main training function""" - action_dim = 2 # number of actions produced by the model - max_action = 1 # maximum absolute value of output actions - state_dim = 11 # number of input values in the neural network (vector length of state input) - device = torch.device( - "cuda" if torch.cuda.is_available() else "cpu" - ) # using cuda if it is available, cpu otherwise - nr_eval_episodes = 10 # how many episodes to use to run evaluation - max_epochs = 100 # max number of epochs - epoch = 0 # starting epoch number - episodes_per_epoch = 70 # how many episodes to run in single epoch - episode = 0 # starting episode number - train_every_n = 10 # train and update network parameters every n episodes - training_iterations = 80 # how many batches to use for single training cycle - batch_size = 16 # batch size for each training iteration - max_steps = 300 # maximum number of steps in single episode - steps = 0 # starting step number - load_saved_buffer = False # whether to load experiences from assets/data.yml - pretrain = False # whether to use the loaded experiences to pre-train the model (load_saved_buffer must be True) - pretraining_iterations = ( - 10 # number of training iterations to run during pre-training - ) - save_every = 5 # save the model every n training cycles - - - - sim = SIM_ENV(world_file="multi_robot_world2.yaml",disable_plotting=False) # instantiate environment - - model = CNNTD3( - state_dim=state_dim, - action_dim=action_dim, - max_action=max_action, - num_robots=sim.num_robots, - device=device, - save_every=save_every, - load_model=False, - model_name="phase1", - load_model_name="phase1" - ) # instantiate a model - - replay_buffer = get_buffer( - model, - sim, - load_saved_buffer, - pretrain, - pretraining_iterations, - training_iterations, - batch_size, - ) - con = torch.tensor([[0. for _ in range(sim.num_robots-1)] for _ in range(sim.num_robots) ]) - - poses, distance, cos, sin, collision, goal, a, reward, positions, goal_positions = sim.step([[0, 0] for _ in range(sim.num_robots)], con) # get the initial step state - running_goals = 0 - running_collisions = 0 - running_timesteps = 0 - iter = 1 - while epoch < max_epochs: # train until max_epochs is reached - state, terminal = model.prepare_state( - poses, distance, cos, sin, collision, goal, a, positions, goal_positions - ) # get state a state representation from returned data from the environment - - action, connection, combined_weights = model.get_action(np.array(state), True) # get an action from the model - - a_in = [[(a[0] + 1) / 4, a[1]] for a in action] # clip linear velocity to [0, 0.5] m/s range - - poses, distance, cos, sin, collision, goal, a, reward, positions, goal_positions = sim.step(a_in, connection, combined_weights) # get data from the environment - running_goals += sum(goal) - running_collisions += sum(collision) - running_timesteps += 1 - next_state, terminal = model.prepare_state( - poses, distance, cos, sin, collision, goal, a, positions, goal_positions - ) # get a next state representation - replay_buffer.add( - state, action, reward, terminal, next_state - ) # add experience to the replay buffer - outside = outside_of_bounds(poses) - if ( - any(terminal) or steps == max_steps or outside - ): # reset environment of terminal stat ereached, or max_steps were taken - poses, distance, cos, sin, collision, goal, a, reward, positions, goal_positions = sim.reset() - episode += 1 - if episode % train_every_n == 0: - model.writer.add_scalar("run/avg_goal", running_goals/running_timesteps, iter) - model.writer.add_scalar("run/avg_collision", running_collisions / running_timesteps, iter) - running_goals = 0 - running_collisions = 0 - running_timesteps = 0 - iter += 1 - model.train( - replay_buffer=replay_buffer, - iterations=training_iterations, - batch_size=batch_size, - ) # train the model and update its parameters - - steps = 0 - else: - steps += 1 - - # if ( - # episode + 1 - # ) % episodes_per_epoch == 0: # if epoch is concluded, run evaluation - # episode = 0 - # epoch += 1 - # # evaluate(model, epoch, sim, eval_episodes=nr_eval_episodes) - - -def evaluate(model, epoch, sim, eval_episodes=10): - print("..............................................") - print(f"Epoch {epoch}. Evaluating scenarios") - avg_reward = 0.0 - col = 0 - goals = 0 - for _ in range(eval_episodes): - count = 0 - poses, distance, cos, sin, collision, goal, a, reward, positions, goal_positions = sim.reset() - done = False - while not done and count < 501: - state, terminal = model.prepare_state( - poses, distance, cos, sin, collision, goal, a, positions, goal_positions - ) - action, connection, combined_weights = model.get_action(np.array(state), False) - a_in = [[(a[0] + 1) / 4, a[1]] for a in action] - poses, distance, cos, sin, collision, goal, a, reward, positions, goal_positions = sim.step(a_in, connection, combined_weights) - avg_reward += sum(reward)/len(reward) - count += 1 - if collision: - col += 1 - if goal: - goals += 1 - done = collision or goal - avg_reward /= eval_episodes - avg_col = col / eval_episodes - avg_goal = goals / eval_episodes - print(f"Average Reward: {avg_reward}") - print(f"Average Collision rate: {avg_col}") - print(f"Average Goal rate: {avg_goal}") - print("..............................................") - model.writer.add_scalar("eval/avg_reward", avg_reward, epoch) - model.writer.add_scalar("eval/avg_col", avg_col, epoch) - model.writer.add_scalar("eval/avg_goal", avg_goal, epoch) - - -if __name__ == "__main__": - main() diff --git a/robot_nav/sim.py b/robot_nav/sim.py deleted file mode 100644 index 1fbc26f..0000000 --- a/robot_nav/sim.py +++ /dev/null @@ -1,209 +0,0 @@ -import irsim -import numpy as np -import random - -import torch - - -class SIM_ENV: - """ - A simulation environment interface for robot navigation using IRSim. - - This class wraps around the IRSim environment and provides methods for stepping, - resetting, and interacting with a mobile robot, including reward computation. - - Attributes: - env (object): The simulation environment instance from IRSim. - robot_goal (np.ndarray): The goal position of the robot. - """ - - def __init__(self, world_file="robot_world.yaml", disable_plotting=False): - """ - Initialize the simulation environment. - - Args: - world_file (str): Path to the world configuration YAML file. - disable_plotting (bool): If True, disables rendering and plotting. - """ - display = False if disable_plotting else True - self.env = irsim.make( - world_file, disable_all_plot=disable_plotting, display=display - ) - robot_info = self.env.get_robot_info(0) - self.robot_goal = robot_info.goal - self.num_robots = len(self.env.robot_list) - - def step(self, action, connection): - """ - Perform one step in the simulation using the given control commands. - - Args: - lin_velocity (float): Linear velocity to apply to the robot. - ang_velocity (float): Angular velocity to apply to the robot. - - Returns: - (tuple): Contains the latest LIDAR scan, distance to goal, cosine and sine of angle to goal, - collision flag, goal reached flag, applied action, and computed reward. - """ - # action = [[lin_velocity, ang_velocity], [lin_velocity, ang_velocity], [lin_velocity, ang_velocity], [lin_velocity, ang_velocity], [lin_velocity, ang_velocity]] - self.env.step(action_id=[i for i in range(self.num_robots)], action=action) - self.env.render() - - lasers = [] - distances = [] - coss = [] - sins = [] - collisions = [] - goals = [] - rewards = [] - positions = [] - for i in range(self.num_robots): - - - - - - - scan = self.env.get_lidar_scan(id=i) - latest_scan = scan["ranges"] - robot_state = self.env.robot_list[i].state - robot_goal = self.env.robot_list[i].goal - goal_vector = [ - robot_goal[0].item() - robot_state[0].item(), - robot_goal[1].item() - robot_state[1].item(), - ] - distance = np.linalg.norm(goal_vector) - goal = self.env.robot_list[i].arrive - pose_vector = [np.cos(robot_state[2]).item(), np.sin(robot_state[2]).item()] - cos, sin = self.cossin(pose_vector, goal_vector) - collision = self.env.robot_list[i].collision - action_i = action[i] - reward = self.get_reward(goal, collision, action_i, latest_scan) - - position = [robot_state[0].item(), robot_state[1].item()] - - lasers.append(latest_scan) - distances.append(distance) - coss.append(cos) - sins.append(sin) - collisions.append(collision) - goals.append(goal) - rewards.append(reward) - positions.append(position) - - i_connections = connection[0][i].tolist() - i_connections.insert(i, 0) - - for j in range(self.num_robots): - if i_connections[j] > 0.5: - other_robot_state = self.env.robot_list[j].state - other_pos = [other_robot_state[0].item(), other_robot_state[1].item()] - rx = [position[0], other_pos[0]] - ry = [position[1], other_pos[1]] - self.env.draw_trajectory(np.array([rx, ry]), refresh=True) - - if goal: - self.env.robot_list[i].set_random_goal( - obstacle_list=self.env.obstacle_list, - init=True, - range_limits=[[1, 1, -3.141592653589793], [11, 11, 3.141592653589793]], - ) - - return lasers, distances, coss, sins, collisions, goals, action, rewards, positions - - def reset( - self, - robot_state=None, - robot_goal=None, - random_obstacles=True, - random_obstacle_ids=None, - ): - """ - Reset the simulation environment, optionally setting robot and obstacle states. - - Args: - robot_state (list or None): Initial state of the robot as a list of [x, y, theta, speed]. - robot_goal (list or None): Goal state for the robot. - random_obstacles (bool): Whether to randomly reposition obstacles. - random_obstacle_ids (list or None): Specific obstacle IDs to randomize. - - Returns: - (tuple): Initial observation after reset, including LIDAR scan, distance, cos/sin, - and reward-related flags and values. - """ - if robot_state is None: - robot_state = [[random.uniform(1, 11)], [random.uniform(1, 11)], [0]] - - for robot in self.env.robot_list: - robot_state = [[random.uniform(1, 11)], [random.uniform(1, 11)], [0]] - robot.set_state( - state=np.array(robot_state), - init=True, - ) - - if random_obstacles: - if random_obstacle_ids is None: - random_obstacle_ids = [i + self.num_robots for i in range(7)] - self.env.random_obstacle_position( - range_low=[0, 0, -3.14], - range_high=[12, 12, 3.14], - ids=random_obstacle_ids, - non_overlapping=True, - ) - - for robot in self.env.robot_list: - if robot_goal is None: - robot.set_random_goal( - obstacle_list=self.env.obstacle_list, - init=True, - range_limits=[[1, 1, -3.141592653589793], [11, 11, 3.141592653589793]], - ) - else: - self.env.robot.set_goal(np.array(robot_goal), init=True) - self.env.reset() - self.robot_goal = self.env.robot.goal - - action = [[0.0, 0.0] for _ in range(self.num_robots)] - con = torch.tensor([[[0, 0], [0, 0], [0, 0]]]) - latest_scan, distance, cos, sin, _, _, action, reward, positions = self.step(action, con) - return latest_scan, distance, cos, sin, [False]*self.num_robots, [False]*self.num_robots, action, reward, positions - - @staticmethod - def cossin(vec1, vec2): - """ - Compute the cosine and sine of the angle between two 2D vectors. - - Args: - vec1 (list): First 2D vector. - vec2 (list): Second 2D vector. - - Returns: - (tuple): (cosine, sine) of the angle between the vectors. - """ - vec1 = vec1 / np.linalg.norm(vec1) - vec2 = vec2 / np.linalg.norm(vec2) - cos = np.dot(vec1, vec2) - sin = vec1[0] * vec2[1] - vec1[1] * vec2[0] - return cos, sin - - @staticmethod - def get_reward(goal, collision, action, laser_scan): - """ - Calculate the reward for the current step. - - Args: - goal (bool): Whether the goal has been reached. - collision (bool): Whether a collision occurred. - action (list): The action taken [linear velocity, angular velocity]. - laser_scan (list): The LIDAR scan readings. - - Returns: - (float): Computed reward for the current state. - """ - if goal: - return 100.0 - elif collision: - return -100.0 - else: - r3 = lambda x: 1.35 - x if x < 1.35 else 0.0 - return action[0] - abs(action[1]) / 2 - r3(min(laser_scan)) / 2 diff --git a/robot_nav/test.py b/robot_nav/test.py index fe59051..087f570 100644 --- a/robot_nav/test.py +++ b/robot_nav/test.py @@ -1,13 +1,8 @@ from models.TD3.TD3 import TD3 -from models.DDPG.DDPG import DDPG -from models.SAC.SAC import SAC -from models.HCM.hardcoded_model import HCM -from models.PPO.PPO import PPO -from robot_nav.models.CNNTD3.CNNTD3 import CNNTD3 import torch import numpy as np -from sim import SIM_ENV +from robot_nav.SIM_ENV.sim import SIM import yaml @@ -31,7 +26,7 @@ def main(args=None): model_name="TD3", ) # instantiate a model - sim = SIM_ENV(world_file="eval_world.yaml") # instantiate environment + sim = SIM(world_file="eval_world.yaml") # instantiate environment with open("robot_nav/eval_points.yaml") as file: points = yaml.safe_load(file) robot_poses = points["robot"]["poses"] diff --git a/robot_nav/test_random.py b/robot_nav/test_random.py index 90446a2..63690ec 100644 --- a/robot_nav/test_random.py +++ b/robot_nav/test_random.py @@ -1,16 +1,11 @@ -from robot_nav.models.TD3.TD3 import TD3 -from robot_nav.models.DDPG.DDPG import DDPG from robot_nav.models.SAC.SAC import SAC -from robot_nav.models.HCM.hardcoded_model import HCM -from robot_nav.models.PPO.PPO import PPO -from robot_nav.models.CNNTD3.CNNTD3 import CNNTD3 import statistics import numpy as np import tqdm import matplotlib.pyplot as plt import torch -from sim import SIM_ENV +from robot_nav.SIM_ENV.sim import SIM def main(args=None): @@ -34,7 +29,7 @@ def main(args=None): model_name="SAC", ) # instantiate a model - sim = SIM_ENV( + sim = SIM( world_file="eval_world.yaml", disable_plotting=False ) # instantiate environment diff --git a/robot_nav/test_rnn.py b/robot_nav/test_rnn.py index 526ee34..9fca0b9 100644 --- a/robot_nav/test_rnn.py +++ b/robot_nav/test_rnn.py @@ -4,7 +4,7 @@ import torch import numpy as np -from sim import SIM_ENV +from robot_nav.SIM_ENV.sim import SIM import yaml @@ -27,7 +27,7 @@ def main(args=None): load_model=True, ) # instantiate a model - sim = SIM_ENV(world_file="eval_world.yaml") # instantiate environment + sim = SIM(world_file="eval_world.yaml") # instantiate environment with open("robot_nav/eval_points.yaml") as file: points = yaml.safe_load(file) robot_poses = points["robot"]["poses"] diff --git a/robot_nav/train.py b/robot_nav/train.py index 403cdb0..6d97362 100644 --- a/robot_nav/train.py +++ b/robot_nav/train.py @@ -1,13 +1,8 @@ -from robot_nav.models.TD3.TD3 import TD3 -from robot_nav.models.DDPG.DDPG import DDPG -from robot_nav.models.SAC.SAC import SAC -from robot_nav.models.HCM.hardcoded_model import HCM -from robot_nav.models.PPO.PPO import PPO from robot_nav.models.CNNTD3.CNNTD3 import CNNTD3 import torch import numpy as np -from sim import SIM_ENV +from robot_nav.SIM_ENV.sim import SIM from utils import get_buffer @@ -46,7 +41,9 @@ def main(args=None): model_name="CNNTD3", ) # instantiate a model - sim = SIM_ENV(world_file="multi_robot_world.yaml",disable_plotting=False) # instantiate environment + sim = SIM( + world_file="robot_world.yaml", disable_plotting=False + ) # instantiate environment replay_buffer = get_buffer( model, sim, diff --git a/robot_nav/train_marl.py b/robot_nav/train_marl.py new file mode 100644 index 0000000..52e4dfa --- /dev/null +++ b/robot_nav/train_marl.py @@ -0,0 +1,154 @@ +from robot_nav.models.MARL.marlTD3 import TD3 + +import torch +import numpy as np +from robot_nav.SIM_ENV.marl_sim import MARL_SIM +from utils import get_buffer + + +def outside_of_bounds(poses): + outside = False + for pose in poses: + norm_x = pose[0] - 6 + norm_y = pose[1] - 6 + if abs(norm_x) > 10.5 or abs(norm_y) > 10.5: + outside = True + break + return outside + + +def main(args=None): + """Main training function""" + action_dim = 2 # number of actions produced by the model + max_action = 1 # maximum absolute value of output actions + state_dim = 11 # number of input values in the neural network (vector length of state input) + device = torch.device( + "cuda" if torch.cuda.is_available() else "cpu" + ) # using cuda if it is available, cpu otherwise + max_epochs = 600 # max number of epochs + epoch = 1 # starting epoch number + episode = 0 # starting episode number + train_every_n = 10 # train and update network parameters every n episodes + training_iterations = 80 # how many batches to use for single training cycle + batch_size = 16 # batch size for each training iteration + max_steps = 300 # maximum number of steps in single episode + steps = 0 # starting step number + load_saved_buffer = False # whether to load experiences from assets/data.yml + pretrain = False # whether to use the loaded experiences to pre-train the model (load_saved_buffer must be True) + pretraining_iterations = ( + 10 # number of training iterations to run during pre-training + ) + save_every = 5 # save the model every n training cycles + + sim = MARL_SIM( + world_file="multi_robot_world.yaml", disable_plotting=False + ) # instantiate environment + + model = TD3( + state_dim=state_dim, + action_dim=action_dim, + max_action=max_action, + num_robots=sim.num_robots, + device=device, + save_every=save_every, + load_model=False, + model_name="phase1", + load_model_name="phase1", + ) # instantiate a model + + replay_buffer = get_buffer( + model, + sim, + load_saved_buffer, + pretrain, + pretraining_iterations, + training_iterations, + batch_size, + ) + connections = torch.tensor( + [[0.0 for _ in range(sim.num_robots - 1)] for _ in range(sim.num_robots)] + ) + + poses, distance, cos, sin, collision, goal, a, reward, positions, goal_positions = ( + sim.step([[0, 0] for _ in range(sim.num_robots)], connections) + ) # get the initial step state + running_goals = 0 + running_collisions = 0 + running_timesteps = 0 + while epoch < max_epochs: # train until max_epochs is reached + state, terminal = model.prepare_state( + poses, distance, cos, sin, collision, a, goal_positions + ) # get state a state representation from returned data from the environment + + action, connection, combined_weights = model.get_action( + np.array(state), True + ) # get an action from the model + + a_in = [ + [(a[0] + 1) / 4, a[1]] for a in action + ] # clip linear velocity to [0, 0.5] m/s range + + ( + poses, + distance, + cos, + sin, + collision, + goal, + a, + reward, + positions, + goal_positions, + ) = sim.step( + a_in, connection, combined_weights + ) # get data from the environment + running_goals += sum(goal) + running_collisions += sum(collision) + running_timesteps += 1 + next_state, terminal = model.prepare_state( + poses, distance, cos, sin, collision, a, goal_positions + ) # get a next state representation + replay_buffer.add( + state, action, reward, terminal, next_state + ) # add experience to the replay buffer + outside = outside_of_bounds(poses) + if ( + any(terminal) or steps == max_steps or outside + ): # reset environment of terminal state reached, or max_steps were taken + ( + poses, + distance, + cos, + sin, + collision, + goal, + a, + reward, + positions, + goal_positions, + ) = sim.reset() + episode += 1 + if episode % train_every_n == 0: + model.writer.add_scalar( + "run/avg_goal", running_goals / running_timesteps, epoch + ) + model.writer.add_scalar( + "run/avg_collision", running_collisions / running_timesteps, epoch + ) + running_goals = 0 + running_collisions = 0 + running_timesteps = 0 + epoch += 1 + model.train( + replay_buffer=replay_buffer, + iterations=training_iterations, + batch_size=batch_size, + ) # train the model and update its parameters + + steps = 0 + else: + steps += 1 + + +if __name__ == "__main__": + main() diff --git a/robot_nav/train_rnn.py b/robot_nav/train_rnn.py index aec1f70..5d1ad4e 100644 --- a/robot_nav/train_rnn.py +++ b/robot_nav/train_rnn.py @@ -5,7 +5,7 @@ import numpy as np -from sim import SIM_ENV +from robot_nav.SIM_ENV.sim import SIM from utils import get_buffer @@ -47,7 +47,7 @@ def main(args=None): rnn="gru", ) # instantiate a model - sim = SIM_ENV() # instantiate environment + sim = SIM() # instantiate environment replay_buffer = get_buffer( model, sim, diff --git a/tests/test_model.py b/tests/test_model.py index 13193ac..7b3c354 100644 --- a/tests/test_model.py +++ b/tests/test_model.py @@ -6,11 +6,12 @@ from robot_nav.models.SAC.SAC import SAC from robot_nav.models.DDPG.DDPG import DDPG from robot_nav.utils import get_buffer -from robot_nav.sim import SIM_ENV +from robot_nav.SIM_ENV.sim import SIM import pytest PROJECT_ROOT = Path(__file__).resolve().parents[1] + @pytest.mark.parametrize( "model, state_dim", [ @@ -31,7 +32,7 @@ def test_models(model, state_dim): load_model=False, ) # instantiate a model - sim = SIM_ENV + sim = SIM prefilled_buffer = get_buffer( model=test_model, @@ -71,7 +72,7 @@ def test_max_bound_models(model, state_dim): use_max_bound=True, ) # instantiate a model - sim = SIM_ENV + sim = SIM prefilled_buffer = get_buffer( model=test_model, diff --git a/tests/test_sim.py b/tests/test_sim.py index adb9e01..5cf3edb 100644 --- a/tests/test_sim.py +++ b/tests/test_sim.py @@ -2,17 +2,17 @@ import pytest -from robot_nav.sim import SIM_ENV +from robot_nav.SIM_ENV.sim import SIM import numpy as np skip_on_ci = pytest.mark.skipif( - os.getenv("CI") == "true", - reason="Skipped on CI (GitHub Actions)" + os.getenv("CI") == "true", reason="Skipped on CI (GitHub Actions)" ) + @skip_on_ci def test_sim(): - sim = SIM_ENV("/tests/test_world.yaml") + sim = SIM("/tests/test_world.yaml") robot_state = sim.env.get_robot_state() state = sim.step(1, 0) next_robot_state = sim.env.get_robot_state() @@ -27,9 +27,10 @@ def test_sim(): assert np.not_equal(robot_state[0], new_robot_state[0]) assert np.not_equal(robot_state[1], new_robot_state[1]) + @skip_on_ci def test_sincos(): - sim = SIM_ENV("/tests/test_world.yaml") + sim = SIM("/tests/test_world.yaml") cos, sin = sim.cossin([1, 0], [0, 1]) assert np.isclose(cos, 0) assert np.isclose(sin, 1) diff --git a/tests/test_utils.py b/tests/test_utils.py index 7aa18ef..2414152 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -4,10 +4,11 @@ from robot_nav.models.PPO.PPO import PPO, RolloutBuffer from robot_nav.models.RCPG.RCPG import RCPG from robot_nav.utils import get_buffer, RolloutReplayBuffer, ReplayBuffer -from robot_nav.sim import SIM_ENV +from robot_nav.SIM_ENV.sim import SIM PROJECT_ROOT = Path(__file__).resolve().parents[1] + def test_buffer(): model = SAC( state_dim=10, @@ -18,7 +19,7 @@ def test_buffer(): load_model=False, ) # instantiate a model - sim = SIM_ENV + sim = SIM buffer = get_buffer( model=model, sim=sim, @@ -57,7 +58,7 @@ def test_rollout_buffer(): load_model=False, ) # instantiate a model - sim = SIM_ENV + sim = SIM buffer = get_buffer( model=model, sim=sim, @@ -95,7 +96,7 @@ def test_ppo_buffer(): save_every=0, load_model=False, ) - sim = SIM_ENV + sim = SIM buffer = get_buffer( model=model, sim=sim, From 564294f48796e83f4f1ec17d8c2bfbf84b8b1048 Mon Sep 17 00:00:00 2001 From: reinis Date: Wed, 2 Jul 2025 11:45:49 +0200 Subject: [PATCH 10/12] second refactor --- robot_nav/SIM_ENV/marl_sim.py | 82 +++++++++++++++------------- robot_nav/models/CNNTD3/CNNTD3.py | 4 +- robot_nav/models/DDPG/DDPG.py | 4 +- robot_nav/models/MARL/hsAttention.py | 35 +++++------- robot_nav/models/MARL/marlTD3.py | 28 ++++------ robot_nav/models/SAC/SAC.py | 4 +- robot_nav/models/TD3/TD3.py | 4 +- robot_nav/multi_robot_world.yaml | 9 +-- 8 files changed, 80 insertions(+), 90 deletions(-) diff --git a/robot_nav/SIM_ENV/marl_sim.py b/robot_nav/SIM_ENV/marl_sim.py index f8b1961..8fd10ba 100644 --- a/robot_nav/SIM_ENV/marl_sim.py +++ b/robot_nav/SIM_ENV/marl_sim.py @@ -8,10 +8,10 @@ class MARL_SIM(SIM_ENV): """ - A simulation environment interface for robot navigation using IRSim. + A simulation environment interface for robot navigation using IRSim in MARL setting. This class wraps around the IRSim environment and provides methods for stepping, - resetting, and interacting with a mobile robot, including reward computation. + resetting, and interacting with mobile robots, including reward computation. Attributes: env (object): The simulation environment instance from IRSim. @@ -33,6 +33,8 @@ def __init__(self, world_file="multi_robot_world.yaml", disable_plotting=False): robot_info = self.env.get_robot_info(0) self.robot_goal = robot_info.goal self.num_robots = len(self.env.robot_list) + self.x_range = self.env._world.x_range + self.y_range = self.env._world.y_range def step(self, action, connection, combined_weights=None): """ @@ -46,7 +48,6 @@ def step(self, action, connection, combined_weights=None): (tuple): Contains the latest LIDAR scan, distance to goal, cosine and sine of angle to goal, collision flag, goal reached flag, applied action, and computed reward. """ - # action = [[lin_velocity, ang_velocity], [lin_velocity, ang_velocity], [lin_velocity, ang_velocity], [lin_velocity, ang_velocity], [lin_velocity, ang_velocity]] self.env.step(action_id=[i for i in range(self.num_robots)], action=action) self.env.render() @@ -139,8 +140,8 @@ def step(self, action, connection, combined_weights=None): obstacle_list=self.env.obstacle_list, init=True, range_limits=[ - [1, 1, -3.141592653589793], - [11, 11, 3.141592653589793], + [self.x_range[0] + 1, self.y_range[0] + 1, -3.141592653589793], + [self.x_range[1] - 1, self.y_range[1] - 1, 3.141592653589793], ], ) @@ -209,8 +210,8 @@ def reset( if random_obstacle_ids is None: random_obstacle_ids = [i + self.num_robots for i in range(7)] self.env.random_obstacle_position( - range_low=[0, 0, -3.14], - range_high=[12, 12, 3.14], + range_low=[self.x_range[0], self.y_range[0], -3.14], + range_high=[self.x_range[1], self.y_range[1], 3.14], ids=random_obstacle_ids, non_overlapping=True, ) @@ -221,8 +222,8 @@ def reset( obstacle_list=self.env.obstacle_list, init=True, range_limits=[ - [1, 1, -3.141592653589793], - [11, 11, 3.141592653589793], + [self.x_range[0] + 1, self.y_range[0] + 1, -3.141592653589793], + [self.x_range[1] - 1, self.y_range[1] - 1, 3.141592653589793], ], ) else: @@ -251,7 +252,7 @@ def reset( ) @staticmethod - def get_reward(goal, collision, action, closest_robots, distance): + def get_reward(goal, collision, action, closest_robots, distance, phase=1): """ Calculate the reward for the current step. @@ -259,36 +260,41 @@ def get_reward(goal, collision, action, closest_robots, distance): goal (bool): Whether the goal has been reached. collision (bool): Whether a collision occurred. action (list): The action taken [linear velocity, angular velocity]. - laser_scan (list): The LIDAR scan readings. + closest_robots (list): Distances to the closest robots. + distance (float): Distance to goal. + phase (int, optional): Reward function phase. Defaults to 1. Returns: (float): Computed reward for the current state. """ - # phase1 - if goal: - return 100.0 - elif collision: - return -100.0 * 3 * action[0] - else: - r_dist = 1.5 / distance - cl_pen = 0 - for rob in closest_robots: - add = 1.5 - rob if rob < 1.5 else 0 - cl_pen += add - - return action[0] - 0.5 * abs(action[1]) - cl_pen + r_dist - - # phase2 - # if goal: - # return 70.0 - # elif collision: - # return -100.0 * 3 * action[0] - # else: - # r_dist = 1.5 / distance - # cl_pen = 0 - # for rob in closest_robots: - # add = (3 - rob)**2 if rob < 3 else 0 - # cl_pen += add - # - # return -0.5 * abs(action[1]) - cl_pen + match phase: + case 1: + if goal: + return 100.0 + elif collision: + return -100.0 * 3 * action[0] + else: + r_dist = 1.5 / distance + cl_pen = 0 + for rob in closest_robots: + add = 1.5 - rob if rob < 1.5 else 0 + cl_pen += add + + return action[0] - 0.5 * abs(action[1]) - cl_pen + r_dist + + case 2: + if goal: + return 70.0 + elif collision: + return -100.0 * 3 * action[0] + else: + cl_pen = 0 + for rob in closest_robots: + add = (3 - rob) ** 2 if rob < 3 else 0 + cl_pen += add + + return -0.5 * abs(action[1]) - cl_pen + + case _: + raise ValueError("Unknown reward phase") diff --git a/robot_nav/models/CNNTD3/CNNTD3.py b/robot_nav/models/CNNTD3/CNNTD3.py index 0be20da..af6cf45 100644 --- a/robot_nav/models/CNNTD3/CNNTD3.py +++ b/robot_nav/models/CNNTD3/CNNTD3.py @@ -325,8 +325,8 @@ def train( state = torch.Tensor(batch_states).to(self.device) next_state = torch.Tensor(batch_next_states).to(self.device) action = torch.Tensor(batch_actions).to(self.device) - reward = torch.Tensor(batch_rewards).to(self.device) - done = torch.Tensor(batch_dones).to(self.device) + reward = torch.Tensor(batch_rewards).to(self.device).reshape(-1, 1) + done = torch.Tensor(batch_dones).to(self.device).reshape(-1, 1) # Obtain the estimated action from the next state by using the actor-target next_action = self.actor_target(next_state) diff --git a/robot_nav/models/DDPG/DDPG.py b/robot_nav/models/DDPG/DDPG.py index 0f9b98b..2b3abfe 100644 --- a/robot_nav/models/DDPG/DDPG.py +++ b/robot_nav/models/DDPG/DDPG.py @@ -254,8 +254,8 @@ def train( state = torch.Tensor(batch_states).to(self.device) next_state = torch.Tensor(batch_next_states).to(self.device) action = torch.Tensor(batch_actions).to(self.device) - reward = torch.Tensor(batch_rewards).to(self.device) - done = torch.Tensor(batch_dones).to(self.device) + reward = torch.Tensor(batch_rewards).to(self.device).reshape(-1, 1) + done = torch.Tensor(batch_dones).to(self.device).reshape(-1, 1) # Obtain the estimated action from the next state by using the actor-target next_action = self.actor_target(next_state) diff --git a/robot_nav/models/MARL/hsAttention.py b/robot_nav/models/MARL/hsAttention.py index 927c1df..4978a9f 100644 --- a/robot_nav/models/MARL/hsAttention.py +++ b/robot_nav/models/MARL/hsAttention.py @@ -9,7 +9,6 @@ def __init__(self, embedding_dim): super(Attention, self).__init__() self.embedding_dim = embedding_dim - # CNN for laser scan self.embedding1 = nn.Linear(5, 128) nn.init.kaiming_uniform_(self.embedding1.weight, nonlinearity="leaky_relu") self.embedding2 = nn.Linear(128, embedding_dim) @@ -28,7 +27,7 @@ def __init__(self, embedding_dim): self.k = nn.Linear(10, embedding_dim, bias=False) self.v = nn.Linear(10, embedding_dim) - # Soft attention score network (with distance) + # Soft attention score network (with polar other robot goal position) self.attn_score_layer = nn.Sequential( nn.Linear(embedding_dim * 2, embedding_dim), nn.ReLU(), @@ -58,8 +57,8 @@ def forward(self, embedding): ) # assume (cos(θ), sin(θ)) action = embedding[:, :, 7:9].reshape(batch_size, n_agents, 2) goal = embedding[:, :, -2:].reshape(batch_size, n_agents, 2) - goal_j = goal.unsqueeze(1).expand(-1, n_agents, -1, -1) # (B, N, N, 2) - pos_i = position.unsqueeze(2) # (B, N, 1, 2) + goal_j = goal.unsqueeze(1).expand(-1, n_agents, -1, -1) + pos_i = position.unsqueeze(2) goal_rel_vec = goal_j - pos_i agent_embed = self.encode_agent_features(embed) @@ -100,10 +99,10 @@ def forward(self, embedding): action.unsqueeze(1).expand(-1, n_agents, -1, -1), # (B, N, N, 2) ], dim=-1, - ) # (B, N, N, 7) + ) # Broadcast h_i along N (for each pair) - h_i_expanded = h_i.expand(-1, -1, n_agents, -1) # (B, N, N, D) + h_i_expanded = h_i.expand(-1, -1, n_agents, -1) # Remove self-pairs using mask mask = ~torch.eye(n_agents, dtype=torch.bool, device=embedding.device) @@ -115,7 +114,7 @@ def forward(self, embedding): ) # Concatenate agent embedding and edge features - hard_input = torch.cat([h_i_flat, edge_flat], dim=-1) # (B*N, N-1, D+7) + hard_input = torch.cat([h_i_flat, edge_flat], dim=-1) # Hard attention forward h_hard = self.hard_mlp(hard_input) @@ -125,8 +124,7 @@ def forward(self, embedding): ].unsqueeze(2) hard_weights = hard_weights.view(batch_size, n_agents, n_agents - 1) - unnorm_rel_vec = rel_vec - unnorm_rel_dist = torch.linalg.vector_norm(unnorm_rel_vec, dim=-1, keepdim=True) + unnorm_rel_dist = torch.linalg.vector_norm(rel_vec, dim=-1, keepdim=True) unnorm_rel_dist = unnorm_rel_dist[:, mask].reshape( batch_size * n_agents, n_agents - 1, 1 ) @@ -151,23 +149,21 @@ def forward(self, embedding): soft_edge_features = torch.cat([edge_features, goal_polar], dim=-1) for i in range(n_agents): - q_i = q[:, i : i + 1, :] # (B, 1, D) + q_i = q[:, i : i + 1, :] mask = torch.ones(n_agents, dtype=torch.bool, device=edge_features.device) mask[i] = False edge_i_wo_self = soft_edge_features[:, i, mask, :] - edge_i_wo_self = edge_i_wo_self.squeeze(1) # (B, N-1, 7) + edge_i_wo_self = edge_i_wo_self.squeeze(1) k = F.leaky_relu(self.k(edge_i_wo_self)) - q_i_expanded = q_i.expand(-1, n_agents - 1, -1) # (B, N-1, D) - attention_input = torch.cat([q_i_expanded, k], dim=-1) # (B, N-1, D+7) + q_i_expanded = q_i.expand(-1, n_agents - 1, -1) + attention_input = torch.cat([q_i_expanded, k], dim=-1) # Score computation - scores = self.attn_score_layer(attention_input).transpose( - 1, 2 - ) # (B, 1, N-1) + scores = self.attn_score_layer(attention_input).transpose(1, 2) # Mask using hard weights - h_weights = hard_weights[:, i].unsqueeze(1) # (B, 1, N-1) + h_weights = hard_weights[:, i].unsqueeze(1) mask = (h_weights > 0.5).float() # All-zero mask handling @@ -200,11 +196,8 @@ def forward(self, embedding): ) entropy_list.append(entropy) - # Project each other agent's features to embedding dim *before* the attention-weighted sum v_j = F.leaky_relu(self.v(edge_i_wo_self)) - attn_output = torch.bmm(combined_weights, v_j).squeeze( - 1 - ) # (B, embedding_dim) + attn_output = torch.bmm(combined_weights, v_j).squeeze(1) attention_outputs.append(attn_output) comb_w = torch.stack(combined_w, dim=1).reshape(n_agents, -1) diff --git a/robot_nav/models/MARL/marlTD3.py b/robot_nav/models/MARL/marlTD3.py index a67eb3d..39e4b44 100644 --- a/robot_nav/models/MARL/marlTD3.py +++ b/robot_nav/models/MARL/marlTD3.py @@ -215,6 +215,9 @@ def train( policy_noise=0.2, noise_clip=0.5, policy_freq=2, + bce_weight=0.1, + entropy_weight=1, + connection_proximity_threshold=4, ): av_Q = 0 max_Q = -inf @@ -298,19 +301,16 @@ def train( current_Q2, target_Q ) - proximity_threshold = 4 # You may need to adjust this - targets = (unnorm_rel_dist.flatten() < proximity_threshold).float() + targets = ( + unnorm_rel_dist.flatten() < connection_proximity_threshold + ).float() flat_logits = hard_logits.flatten() bce_loss = F.binary_cross_entropy_with_logits(flat_logits, targets) - bce_weight = 0.1 av_critic_bce_loss.append(bce_loss) - critic_entropy_weight = 1 # or tuneable total_loss = ( - critic_loss - - critic_entropy_weight * mean_entropy - + bce_weight * bce_loss + critic_loss - entropy_weight * mean_entropy + bce_weight * bce_loss ) av_critic_entropy.append(mean_entropy) @@ -328,20 +328,18 @@ def train( action, hard_logits, unnorm_rel_dist, mean_entropy, hard_weights, _ = ( self.actor(state, detach_attn=False) ) - targets = (unnorm_rel_dist.flatten() < proximity_threshold).float() + targets = ( + unnorm_rel_dist.flatten() < connection_proximity_threshold + ).float() flat_logits = hard_logits.flatten() bce_loss = F.binary_cross_entropy_with_logits(flat_logits, targets) - bce_weight = 0.1 av_actor_bce_loss.append(bce_loss) actor_Q, _, _, _, _, _ = self.critic(state, action) actor_loss = -actor_Q.mean() - actor_entropy_weight = 0.05 total_loss = ( - actor_loss - - actor_entropy_weight * mean_entropy - + bce_weight * bce_loss + actor_loss - entropy_weight * mean_entropy + bce_weight * bce_loss ) av_actor_entropy.append(mean_entropy) @@ -458,9 +456,7 @@ def prepare_state( poses (list): Each agent's global pose [x, y, theta]. distance, cos, sin: Unused, can be removed or ignored. collision (list): Collision flags per agent. - goal (list): Goal reached flags per agent. action (list): Last action taken [lin_vel, ang_vel]. - positions (list): Extra features (e.g., neighbors). goal_positions (list): Each agent's goal [x, y]. Returns: @@ -483,7 +479,7 @@ def prepare_state( heading_sin = np.sin(theta) # Last velocity - lin_vel = act[0] * 2 # Assuming original range [-0.5, 0.5] + lin_vel = act[0] * 2 # Assuming original range [0, 0.5] ang_vel = (act[1] + 1) / 2 # Assuming original range [-1, 1] # Final state vector diff --git a/robot_nav/models/SAC/SAC.py b/robot_nav/models/SAC/SAC.py index 10639ae..157e1d3 100644 --- a/robot_nav/models/SAC/SAC.py +++ b/robot_nav/models/SAC/SAC.py @@ -346,8 +346,8 @@ def update(self, replay_buffer, step, batch_size): state = torch.Tensor(batch_states).to(self.device) next_state = torch.Tensor(batch_next_states).to(self.device) action = torch.Tensor(batch_actions).to(self.device) - reward = torch.Tensor(batch_rewards).to(self.device) - done = torch.Tensor(batch_dones).to(self.device) + reward = torch.Tensor(batch_rewards).to(self.device).reshape(-1, 1) + done = torch.Tensor(batch_dones).to(self.device).reshape(-1, 1) self.train_metrics_dict["train/batch_reward_av"].append( batch_rewards.mean().item() ) diff --git a/robot_nav/models/TD3/TD3.py b/robot_nav/models/TD3/TD3.py index 5bdbf16..8c66666 100644 --- a/robot_nav/models/TD3/TD3.py +++ b/robot_nav/models/TD3/TD3.py @@ -268,8 +268,8 @@ def train( state = torch.Tensor(batch_states).to(self.device) next_state = torch.Tensor(batch_next_states).to(self.device) action = torch.Tensor(batch_actions).to(self.device) - reward = torch.Tensor(batch_rewards).to(self.device) - done = torch.Tensor(batch_dones).to(self.device) + reward = torch.Tensor(batch_rewards).to(self.device).reshape(-1, 1) + done = torch.Tensor(batch_dones).to(self.device).reshape(-1, 1) # Obtain the estimated action from the next state by using the actor-target next_action = self.actor_target(next_state) diff --git a/robot_nav/multi_robot_world.yaml b/robot_nav/multi_robot_world.yaml index 2a822f8..66e31ec 100644 --- a/robot_nav/multi_robot_world.yaml +++ b/robot_nav/multi_robot_world.yaml @@ -1,8 +1,8 @@ world: height: 12 # the height of the world width: 12 # the height of the world - step_time: 0.3 # 10Hz calculate each step - sample_time: 0.3 # 10 Hz for render and data extraction + step_time: 0.3 # Calculate each step + sample_time: 0.3 # For render and data extraction collision_mode: 'reactive' robot: @@ -20,8 +20,3 @@ robot: plot: show_trajectory: False - -#obstacle: -# - shape: { name: 'linestring', vertices: [ [ 0, 0 ], [ 12, 0 ], [ 12, 12 ], [ 0, 12 ],[ 0, 0 ] ] } # vertices -# kinematics: {name: 'static'} -# state: [ 0, 0, 0 ] From de2af640ae6b2125af0d1ca52fc0b01fedae5359 Mon Sep 17 00:00:00 2001 From: reinis Date: Wed, 2 Jul 2025 14:47:06 +0200 Subject: [PATCH 11/12] add tests --- tests/test_marl_world.yaml | 22 +++++++++++++++ tests/test_model.py | 57 ++++++++++++++++++++++++++++++++++++++ tests/test_sim.py | 26 ++++++++++++++++- 3 files changed, 104 insertions(+), 1 deletion(-) create mode 100644 tests/test_marl_world.yaml diff --git a/tests/test_marl_world.yaml b/tests/test_marl_world.yaml new file mode 100644 index 0000000..6af7f68 --- /dev/null +++ b/tests/test_marl_world.yaml @@ -0,0 +1,22 @@ +world: + height: 12 # the height of the world + width: 12 # the height of the world + step_time: 1.0 + sample_time: 1.0 + collision_mode: 'reactive' + +robot: + - number: 3 + kinematics: {name: 'diff'} + distribution: {name: 'manual'} + shape: {name: 'circle', radius: 0.2} + vel_min: [ 0, -1.0 ] + vel_max: [ 1.0, 1.0 ] + state: [[3, 10, 0], [3, 6, 0], [3, 2, 0]] + goal: [[9, 9, 0], [8, 8, 0], [7, 7, 0]] + color: ['royalblue', 'red', 'green', 'orange', 'purple', 'yellow', 'cyan', 'magenta', 'lime', 'pink', 'brown'] + arrive_mode: position + goal_threshold: 0.3 + + plot: + show_trajectory: False diff --git a/tests/test_model.py b/tests/test_model.py index 7b3c354..6ad13d7 100644 --- a/tests/test_model.py +++ b/tests/test_model.py @@ -1,5 +1,8 @@ from pathlib import Path +import torch + +from robot_nav.SIM_ENV.marl_sim import MARL_SIM from robot_nav.models.RCPG.RCPG import RCPG from robot_nav.models.TD3.TD3 import TD3 from robot_nav.models.CNNTD3.CNNTD3 import CNNTD3 @@ -7,6 +10,7 @@ from robot_nav.models.DDPG.DDPG import DDPG from robot_nav.utils import get_buffer from robot_nav.SIM_ENV.sim import SIM +from robot_nav.models.MARL.marlTD3 import TD3 as marlTD3 import pytest PROJECT_ROOT = Path(__file__).resolve().parents[1] @@ -91,3 +95,56 @@ def test_max_bound_models(model, state_dim): iterations=2, batch_size=8, ) + + +def test_marl_models(): + sim = MARL_SIM("/tests/test_marl_world.yaml", disable_plotting=True) + test_model = marlTD3( + state_dim=11, + action_dim=2, + max_action=1, + num_robots=sim.num_robots, + device="cpu", + save_every=0, + load_model=False, + ) # instantiate a model + + replay_buffer = get_buffer( + model=test_model, + sim=sim, + load_saved_buffer=False, + pretrain=False, + pretraining_iterations=0, + training_iterations=0, + batch_size=0, + buffer_size=100, + ) + + for _ in range(10): + connections = torch.tensor( + [[0.0 for _ in range(sim.num_robots - 1)] for _ in range(3)] + ) + ( + poses, + distance, + cos, + sin, + collision, + goal, + a, + reward, + positions, + goal_positions, + ) = sim.step([[0, 0] for _ in range(sim.num_robots)], connections) + state, terminal = test_model.prepare_state( + poses, distance, cos, sin, collision, a, goal_positions + ) + replay_buffer.add( + state, [[0, 0] for _ in range(sim.num_robots)], reward, terminal, state + ) + + test_model.train( + replay_buffer=replay_buffer, + iterations=2, + batch_size=8, + ) diff --git a/tests/test_sim.py b/tests/test_sim.py index 5cf3edb..cf2e616 100644 --- a/tests/test_sim.py +++ b/tests/test_sim.py @@ -1,7 +1,9 @@ import os import pytest +import torch +from robot_nav.SIM_ENV.marl_sim import MARL_SIM from robot_nav.SIM_ENV.sim import SIM import numpy as np @@ -12,7 +14,7 @@ @skip_on_ci def test_sim(): - sim = SIM("/tests/test_world.yaml") + sim = SIM("/tests/test_world.yaml", disable_plotting=True) robot_state = sim.env.get_robot_state() state = sim.step(1, 0) next_robot_state = sim.env.get_robot_state() @@ -28,6 +30,28 @@ def test_sim(): assert np.not_equal(robot_state[1], new_robot_state[1]) +def test_marl_sim(): + sim = MARL_SIM("/tests/test_marl_world.yaml", disable_plotting=True) + robot_state = [sim.env.robot_list[i].state[:2] for i in range(3)] + connections = torch.tensor( + [[0.0 for _ in range(sim.num_robots - 1)] for _ in range(3)] + ) + + _ = sim.step([[1, 0], [1, 0], [1, 0]], connections) + next_robot_state = [sim.env.robot_list[i].state[:2] for i in range(3)] + for j in range(3): + assert np.isclose(robot_state[j][0], next_robot_state[j][0] - 1) + assert np.isclose(robot_state[j][1], robot_state[j][1]) + + assert len(sim.env.obstacle_list) == 0 + + sim.reset() + new_robot_state = [sim.env.robot_list[i].state[:2] for i in range(3)] + for j in range(3): + assert np.not_equal(robot_state[j][0], new_robot_state[j][0]) + assert np.not_equal(robot_state[j][1], new_robot_state[j][1]) + + @skip_on_ci def test_sincos(): sim = SIM("/tests/test_world.yaml") From b98444ec7db149bc7988e3253032e6018e4c1c7c Mon Sep 17 00:00:00 2001 From: reinis Date: Wed, 2 Jul 2025 15:01:41 +0200 Subject: [PATCH 12/12] update poetry --- poetry.lock | 320 +++++++++--------- .../{hsAttention.py => hardsoftAttention.py} | 0 robot_nav/models/MARL/marlTD3.py | 2 +- 3 files changed, 164 insertions(+), 158 deletions(-) rename robot_nav/models/MARL/{hsAttention.py => hardsoftAttention.py} (100%) diff --git a/poetry.lock b/poetry.lock index cd87a2d..a5f46f5 100644 --- a/poetry.lock +++ b/poetry.lock @@ -29,18 +29,19 @@ dev = ["backports.zoneinfo", "freezegun (>=1.0,<2.0)", "jinja2 (>=3.0)", "pytest [[package]] name = "backrefs" -version = "5.8" +version = "5.9" description = "A wrapper around re and regex that adds additional back references." optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "backrefs-5.8-py310-none-any.whl", hash = "sha256:c67f6638a34a5b8730812f5101376f9d41dc38c43f1fdc35cb54700f6ed4465d"}, - {file = "backrefs-5.8-py311-none-any.whl", hash = "sha256:2e1c15e4af0e12e45c8701bd5da0902d326b2e200cafcd25e49d9f06d44bb61b"}, - {file = "backrefs-5.8-py312-none-any.whl", hash = "sha256:bbef7169a33811080d67cdf1538c8289f76f0942ff971222a16034da88a73486"}, - {file = "backrefs-5.8-py313-none-any.whl", hash = "sha256:e3a63b073867dbefd0536425f43db618578528e3896fb77be7141328642a1585"}, - {file = "backrefs-5.8-py39-none-any.whl", hash = "sha256:a66851e4533fb5b371aa0628e1fee1af05135616b86140c9d787a2ffdf4b8fdc"}, - {file = "backrefs-5.8.tar.gz", hash = "sha256:2cab642a205ce966af3dd4b38ee36009b31fa9502a35fd61d59ccc116e40a6bd"}, + {file = "backrefs-5.9-py310-none-any.whl", hash = "sha256:db8e8ba0e9de81fcd635f440deab5ae5f2591b54ac1ebe0550a2ca063488cd9f"}, + {file = "backrefs-5.9-py311-none-any.whl", hash = "sha256:6907635edebbe9b2dc3de3a2befff44d74f30a4562adbb8b36f21252ea19c5cf"}, + {file = "backrefs-5.9-py312-none-any.whl", hash = "sha256:7fdf9771f63e6028d7fee7e0c497c81abda597ea45d6b8f89e8ad76994f5befa"}, + {file = "backrefs-5.9-py313-none-any.whl", hash = "sha256:cc37b19fa219e93ff825ed1fed8879e47b4d89aa7a1884860e2db64ccd7c676b"}, + {file = "backrefs-5.9-py314-none-any.whl", hash = "sha256:df5e169836cc8acb5e440ebae9aad4bf9d15e226d3bad049cf3f6a5c20cc8dc9"}, + {file = "backrefs-5.9-py39-none-any.whl", hash = "sha256:f48ee18f6252b8f5777a22a00a09a85de0ca931658f1dd96d4406a34f3748c60"}, + {file = "backrefs-5.9.tar.gz", hash = "sha256:808548cb708d66b82ee231f962cb36faaf4f2baab032f2fbb783e9c2fdddaa59"}, ] [package.extras] @@ -552,67 +553,67 @@ colorama = ">=0.4" [[package]] name = "grpcio" -version = "1.73.0" +version = "1.73.1" description = "HTTP/2-based RPC framework" optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "grpcio-1.73.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:d050197eeed50f858ef6c51ab09514856f957dba7b1f7812698260fc9cc417f6"}, - {file = "grpcio-1.73.0-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:ebb8d5f4b0200916fb292a964a4d41210de92aba9007e33d8551d85800ea16cb"}, - {file = "grpcio-1.73.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:c0811331b469e3f15dda5f90ab71bcd9681189a83944fd6dc908e2c9249041ef"}, - {file = "grpcio-1.73.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:12787c791c3993d0ea1cc8bf90393647e9a586066b3b322949365d2772ba965b"}, - {file = "grpcio-1.73.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2c17771e884fddf152f2a0df12478e8d02853e5b602a10a9a9f1f52fa02b1d32"}, - {file = "grpcio-1.73.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:275e23d4c428c26b51857bbd95fcb8e528783597207ec592571e4372b300a29f"}, - {file = "grpcio-1.73.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:9ffc972b530bf73ef0f948f799482a1bf12d9b6f33406a8e6387c0ca2098a833"}, - {file = "grpcio-1.73.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ebd8d269df64aff092b2cec5e015d8ae09c7e90888b5c35c24fdca719a2c9f35"}, - {file = "grpcio-1.73.0-cp310-cp310-win32.whl", hash = "sha256:072d8154b8f74300ed362c01d54af8b93200c1a9077aeaea79828d48598514f1"}, - {file = "grpcio-1.73.0-cp310-cp310-win_amd64.whl", hash = "sha256:ce953d9d2100e1078a76a9dc2b7338d5415924dc59c69a15bf6e734db8a0f1ca"}, - {file = "grpcio-1.73.0-cp311-cp311-linux_armv7l.whl", hash = "sha256:51036f641f171eebe5fa7aaca5abbd6150f0c338dab3a58f9111354240fe36ec"}, - {file = "grpcio-1.73.0-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:d12bbb88381ea00bdd92c55aff3da3391fd85bc902c41275c8447b86f036ce0f"}, - {file = "grpcio-1.73.0-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:483c507c2328ed0e01bc1adb13d1eada05cc737ec301d8e5a8f4a90f387f1790"}, - {file = "grpcio-1.73.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c201a34aa960c962d0ce23fe5f423f97e9d4b518ad605eae6d0a82171809caaa"}, - {file = "grpcio-1.73.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:859f70c8e435e8e1fa060e04297c6818ffc81ca9ebd4940e180490958229a45a"}, - {file = "grpcio-1.73.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e2459a27c6886e7e687e4e407778425f3c6a971fa17a16420227bda39574d64b"}, - {file = "grpcio-1.73.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:e0084d4559ee3dbdcce9395e1bc90fdd0262529b32c417a39ecbc18da8074ac7"}, - {file = "grpcio-1.73.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:ef5fff73d5f724755693a464d444ee0a448c6cdfd3c1616a9223f736c622617d"}, - {file = "grpcio-1.73.0-cp311-cp311-win32.whl", hash = "sha256:965a16b71a8eeef91fc4df1dc40dc39c344887249174053814f8a8e18449c4c3"}, - {file = "grpcio-1.73.0-cp311-cp311-win_amd64.whl", hash = "sha256:b71a7b4483d1f753bbc11089ff0f6fa63b49c97a9cc20552cded3fcad466d23b"}, - {file = "grpcio-1.73.0-cp312-cp312-linux_armv7l.whl", hash = "sha256:fb9d7c27089d9ba3746f18d2109eb530ef2a37452d2ff50f5a6696cd39167d3b"}, - {file = "grpcio-1.73.0-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:128ba2ebdac41e41554d492b82c34586a90ebd0766f8ebd72160c0e3a57b9155"}, - {file = "grpcio-1.73.0-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:068ecc415f79408d57a7f146f54cdf9f0acb4b301a52a9e563973dc981e82f3d"}, - {file = "grpcio-1.73.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ddc1cfb2240f84d35d559ade18f69dcd4257dbaa5ba0de1a565d903aaab2968"}, - {file = "grpcio-1.73.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e53007f70d9783f53b41b4cf38ed39a8e348011437e4c287eee7dd1d39d54b2f"}, - {file = "grpcio-1.73.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:4dd8d8d092efede7d6f48d695ba2592046acd04ccf421436dd7ed52677a9ad29"}, - {file = "grpcio-1.73.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:70176093d0a95b44d24baa9c034bb67bfe2b6b5f7ebc2836f4093c97010e17fd"}, - {file = "grpcio-1.73.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:085ebe876373ca095e24ced95c8f440495ed0b574c491f7f4f714ff794bbcd10"}, - {file = "grpcio-1.73.0-cp312-cp312-win32.whl", hash = "sha256:cfc556c1d6aef02c727ec7d0016827a73bfe67193e47c546f7cadd3ee6bf1a60"}, - {file = "grpcio-1.73.0-cp312-cp312-win_amd64.whl", hash = "sha256:bbf45d59d090bf69f1e4e1594832aaf40aa84b31659af3c5e2c3f6a35202791a"}, - {file = "grpcio-1.73.0-cp313-cp313-linux_armv7l.whl", hash = "sha256:da1d677018ef423202aca6d73a8d3b2cb245699eb7f50eb5f74cae15a8e1f724"}, - {file = "grpcio-1.73.0-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:36bf93f6a657f37c131d9dd2c391b867abf1426a86727c3575393e9e11dadb0d"}, - {file = "grpcio-1.73.0-cp313-cp313-manylinux_2_17_aarch64.whl", hash = "sha256:d84000367508ade791d90c2bafbd905574b5ced8056397027a77a215d601ba15"}, - {file = "grpcio-1.73.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c98ba1d928a178ce33f3425ff823318040a2b7ef875d30a0073565e5ceb058d9"}, - {file = "grpcio-1.73.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a73c72922dfd30b396a5f25bb3a4590195ee45ecde7ee068acb0892d2900cf07"}, - {file = "grpcio-1.73.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:10e8edc035724aba0346a432060fd192b42bd03675d083c01553cab071a28da5"}, - {file = "grpcio-1.73.0-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:f5cdc332b503c33b1643b12ea933582c7b081957c8bc2ea4cc4bc58054a09288"}, - {file = "grpcio-1.73.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:07ad7c57233c2109e4ac999cb9c2710c3b8e3f491a73b058b0ce431f31ed8145"}, - {file = "grpcio-1.73.0-cp313-cp313-win32.whl", hash = "sha256:0eb5df4f41ea10bda99a802b2a292d85be28958ede2a50f2beb8c7fc9a738419"}, - {file = "grpcio-1.73.0-cp313-cp313-win_amd64.whl", hash = "sha256:38cf518cc54cd0c47c9539cefa8888549fcc067db0b0c66a46535ca8032020c4"}, - {file = "grpcio-1.73.0-cp39-cp39-linux_armv7l.whl", hash = "sha256:1284850607901cfe1475852d808e5a102133461ec9380bc3fc9ebc0686ee8e32"}, - {file = "grpcio-1.73.0-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:0e092a4b28eefb63eec00d09ef33291cd4c3a0875cde29aec4d11d74434d222c"}, - {file = "grpcio-1.73.0-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:33577fe7febffe8ebad458744cfee8914e0c10b09f0ff073a6b149a84df8ab8f"}, - {file = "grpcio-1.73.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:60813d8a16420d01fa0da1fc7ebfaaa49a7e5051b0337cd48f4f950eb249a08e"}, - {file = "grpcio-1.73.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2a9c957dc65e5d474378d7bcc557e9184576605d4b4539e8ead6e351d7ccce20"}, - {file = "grpcio-1.73.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:3902b71407d021163ea93c70c8531551f71ae742db15b66826cf8825707d2908"}, - {file = "grpcio-1.73.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:1dd7fa7276dcf061e2d5f9316604499eea06b1b23e34a9380572d74fe59915a8"}, - {file = "grpcio-1.73.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2d1510c4ea473110cb46a010555f2c1a279d1c256edb276e17fa571ba1e8927c"}, - {file = "grpcio-1.73.0-cp39-cp39-win32.whl", hash = "sha256:d0a1517b2005ba1235a1190b98509264bf72e231215dfeef8db9a5a92868789e"}, - {file = "grpcio-1.73.0-cp39-cp39-win_amd64.whl", hash = "sha256:6228f7eb6d9f785f38b589d49957fca5df3d5b5349e77d2d89b14e390165344c"}, - {file = "grpcio-1.73.0.tar.gz", hash = "sha256:3af4c30918a7f0d39de500d11255f8d9da4f30e94a2033e70fe2a720e184bd8e"}, + {file = "grpcio-1.73.1-cp310-cp310-linux_armv7l.whl", hash = "sha256:2d70f4ddd0a823436c2624640570ed6097e40935c9194482475fe8e3d9754d55"}, + {file = "grpcio-1.73.1-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:3841a8a5a66830261ab6a3c2a3dc539ed84e4ab019165f77b3eeb9f0ba621f26"}, + {file = "grpcio-1.73.1-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:628c30f8e77e0258ab788750ec92059fc3d6628590fb4b7cea8c102503623ed7"}, + {file = "grpcio-1.73.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:67a0468256c9db6d5ecb1fde4bf409d016f42cef649323f0a08a72f352d1358b"}, + {file = "grpcio-1.73.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:68b84d65bbdebd5926eb5c53b0b9ec3b3f83408a30e4c20c373c5337b4219ec5"}, + {file = "grpcio-1.73.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:c54796ca22b8349cc594d18b01099e39f2b7ffb586ad83217655781a350ce4da"}, + {file = "grpcio-1.73.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:75fc8e543962ece2f7ecd32ada2d44c0c8570ae73ec92869f9af8b944863116d"}, + {file = "grpcio-1.73.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6a6037891cd2b1dd1406b388660522e1565ed340b1fea2955b0234bdd941a862"}, + {file = "grpcio-1.73.1-cp310-cp310-win32.whl", hash = "sha256:cce7265b9617168c2d08ae570fcc2af4eaf72e84f8c710ca657cc546115263af"}, + {file = "grpcio-1.73.1-cp310-cp310-win_amd64.whl", hash = "sha256:6a2b372e65fad38842050943f42ce8fee00c6f2e8ea4f7754ba7478d26a356ee"}, + {file = "grpcio-1.73.1-cp311-cp311-linux_armv7l.whl", hash = "sha256:ba2cea9f7ae4bc21f42015f0ec98f69ae4179848ad744b210e7685112fa507a1"}, + {file = "grpcio-1.73.1-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:d74c3f4f37b79e746271aa6cdb3a1d7e4432aea38735542b23adcabaaee0c097"}, + {file = "grpcio-1.73.1-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:5b9b1805a7d61c9e90541cbe8dfe0a593dfc8c5c3a43fe623701b6a01b01d710"}, + {file = "grpcio-1.73.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3215f69a0670a8cfa2ab53236d9e8026bfb7ead5d4baabe7d7dc11d30fda967"}, + {file = "grpcio-1.73.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc5eccfd9577a5dc7d5612b2ba90cca4ad14c6d949216c68585fdec9848befb1"}, + {file = "grpcio-1.73.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:dc7d7fd520614fce2e6455ba89791458020a39716951c7c07694f9dbae28e9c0"}, + {file = "grpcio-1.73.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:105492124828911f85127e4825d1c1234b032cb9d238567876b5515d01151379"}, + {file = "grpcio-1.73.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:610e19b04f452ba6f402ac9aa94eb3d21fbc94553368008af634812c4a85a99e"}, + {file = "grpcio-1.73.1-cp311-cp311-win32.whl", hash = "sha256:d60588ab6ba0ac753761ee0e5b30a29398306401bfbceffe7d68ebb21193f9d4"}, + {file = "grpcio-1.73.1-cp311-cp311-win_amd64.whl", hash = "sha256:6957025a4608bb0a5ff42abd75bfbb2ed99eda29d5992ef31d691ab54b753643"}, + {file = "grpcio-1.73.1-cp312-cp312-linux_armv7l.whl", hash = "sha256:921b25618b084e75d424a9f8e6403bfeb7abef074bb6c3174701e0f2542debcf"}, + {file = "grpcio-1.73.1-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:277b426a0ed341e8447fbf6c1d6b68c952adddf585ea4685aa563de0f03df887"}, + {file = "grpcio-1.73.1-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:96c112333309493c10e118d92f04594f9055774757f5d101b39f8150f8c25582"}, + {file = "grpcio-1.73.1-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f48e862aed925ae987eb7084409a80985de75243389dc9d9c271dd711e589918"}, + {file = "grpcio-1.73.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:83a6c2cce218e28f5040429835fa34a29319071079e3169f9543c3fbeff166d2"}, + {file = "grpcio-1.73.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:65b0458a10b100d815a8426b1442bd17001fdb77ea13665b2f7dc9e8587fdc6b"}, + {file = "grpcio-1.73.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:0a9f3ea8dce9eae9d7cb36827200133a72b37a63896e0e61a9d5ec7d61a59ab1"}, + {file = "grpcio-1.73.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:de18769aea47f18e782bf6819a37c1c528914bfd5683b8782b9da356506190c8"}, + {file = "grpcio-1.73.1-cp312-cp312-win32.whl", hash = "sha256:24e06a5319e33041e322d32c62b1e728f18ab8c9dbc91729a3d9f9e3ed336642"}, + {file = "grpcio-1.73.1-cp312-cp312-win_amd64.whl", hash = "sha256:303c8135d8ab176f8038c14cc10d698ae1db9c480f2b2823f7a987aa2a4c5646"}, + {file = "grpcio-1.73.1-cp313-cp313-linux_armv7l.whl", hash = "sha256:b310824ab5092cf74750ebd8a8a8981c1810cb2b363210e70d06ef37ad80d4f9"}, + {file = "grpcio-1.73.1-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:8f5a6df3fba31a3485096ac85b2e34b9666ffb0590df0cd044f58694e6a1f6b5"}, + {file = "grpcio-1.73.1-cp313-cp313-manylinux_2_17_aarch64.whl", hash = "sha256:052e28fe9c41357da42250a91926a3e2f74c046575c070b69659467ca5aa976b"}, + {file = "grpcio-1.73.1-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1c0bf15f629b1497436596b1cbddddfa3234273490229ca29561209778ebe182"}, + {file = "grpcio-1.73.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ab860d5bfa788c5a021fba264802e2593688cd965d1374d31d2b1a34cacd854"}, + {file = "grpcio-1.73.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:ad1d958c31cc91ab050bd8a91355480b8e0683e21176522bacea225ce51163f2"}, + {file = "grpcio-1.73.1-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:f43ffb3bd415c57224c7427bfb9e6c46a0b6e998754bfa0d00f408e1873dcbb5"}, + {file = "grpcio-1.73.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:686231cdd03a8a8055f798b2b54b19428cdf18fa1549bee92249b43607c42668"}, + {file = "grpcio-1.73.1-cp313-cp313-win32.whl", hash = "sha256:89018866a096e2ce21e05eabed1567479713ebe57b1db7cbb0f1e3b896793ba4"}, + {file = "grpcio-1.73.1-cp313-cp313-win_amd64.whl", hash = "sha256:4a68f8c9966b94dff693670a5cf2b54888a48a5011c5d9ce2295a1a1465ee84f"}, + {file = "grpcio-1.73.1-cp39-cp39-linux_armv7l.whl", hash = "sha256:b4adc97d2d7f5c660a5498bda978ebb866066ad10097265a5da0511323ae9f50"}, + {file = "grpcio-1.73.1-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:c45a28a0cfb6ddcc7dc50a29de44ecac53d115c3388b2782404218db51cb2df3"}, + {file = "grpcio-1.73.1-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:10af9f2ab98a39f5b6c1896c6fc2036744b5b41d12739d48bed4c3e15b6cf900"}, + {file = "grpcio-1.73.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:45cf17dcce5ebdb7b4fe9e86cb338fa99d7d1bb71defc78228e1ddf8d0de8cbb"}, + {file = "grpcio-1.73.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c502c2e950fc7e8bf05c047e8a14522ef7babac59abbfde6dbf46b7a0d9c71e"}, + {file = "grpcio-1.73.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:6abfc0f9153dc4924536f40336f88bd4fe7bd7494f028675e2e04291b8c2c62a"}, + {file = "grpcio-1.73.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:ed451a0e39c8e51eb1612b78686839efd1a920666d1666c1adfdb4fd51680c0f"}, + {file = "grpcio-1.73.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:07f08705a5505c9b5b0cbcbabafb96462b5a15b7236bbf6bbcc6b0b91e1cbd7e"}, + {file = "grpcio-1.73.1-cp39-cp39-win32.whl", hash = "sha256:ad5c958cc3d98bb9d71714dc69f1c13aaf2f4b53e29d4cc3f1501ef2e4d129b2"}, + {file = "grpcio-1.73.1-cp39-cp39-win_amd64.whl", hash = "sha256:42f0660bce31b745eb9d23f094a332d31f210dcadd0fc8e5be7e4c62a87ce86b"}, + {file = "grpcio-1.73.1.tar.gz", hash = "sha256:7fce2cd1c0c1116cf3850564ebfc3264fba75d3c74a7414373f1238ea365ef87"}, ] [package.extras] -protobuf = ["grpcio-tools (>=1.73.0)"] +protobuf = ["grpcio-tools (>=1.73.1)"] [[package]] name = "idna" @@ -696,14 +697,14 @@ files = [ [[package]] name = "ir-sim" -version = "2.5.4" +version = "2.5.5" description = "IR-SIM is an open-source, lightweight robot simulator based on Python, designed for robotics navigation, control, and learning. This simulator provides a simple and user-friendly framework for simulating robots, sensors, and environments, facilitating the development and testing of robotics algorithms with minimal hardware requirements." optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "ir_sim-2.5.4-py3-none-any.whl", hash = "sha256:a899f6da22ee53fb0ac150c9d2054d3de86e101e029c775a9265d3b49e9c6885"}, - {file = "ir_sim-2.5.4.tar.gz", hash = "sha256:30c17a2b1708f9096e19d60968cf7eb77f563008f52ed4b406462c501f76683c"}, + {file = "ir_sim-2.5.5-py3-none-any.whl", hash = "sha256:2c865da7390594ff939caa561e749d13a6ef2bc29190b3cced46be9b7821ed7d"}, + {file = "ir_sim-2.5.5.tar.gz", hash = "sha256:fe45ac921ba0e939a19d5f72c250d40721e4391f0f5fb51ce9770e74bcdb0e5f"}, ] [package.dependencies] @@ -1078,14 +1079,14 @@ pyyaml = ">=5.1" [[package]] name = "mkdocs-material" -version = "9.6.14" +version = "9.6.15" description = "Documentation that simply works" optional = false python-versions = ">=3.8" groups = ["main"] files = [ - {file = "mkdocs_material-9.6.14-py3-none-any.whl", hash = "sha256:3b9cee6d3688551bf7a8e8f41afda97a3c39a12f0325436d76c86706114b721b"}, - {file = "mkdocs_material-9.6.14.tar.gz", hash = "sha256:39d795e90dce6b531387c255bd07e866e027828b7346d3eba5ac3de265053754"}, + {file = "mkdocs_material-9.6.15-py3-none-any.whl", hash = "sha256:ac969c94d4fe5eb7c924b6d2f43d7db41159ea91553d18a9afc4780c34f2717a"}, + {file = "mkdocs_material-9.6.15.tar.gz", hash = "sha256:64adf8fa8dba1a17905b6aee1894a5aafd966d4aeb44a11088519b0f5ca4f1b5"}, ] [package.dependencies] @@ -1529,101 +1530,106 @@ files = [ [[package]] name = "pillow" -version = "11.2.1" +version = "11.3.0" description = "Python Imaging Library (Fork)" optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "pillow-11.2.1-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:d57a75d53922fc20c165016a20d9c44f73305e67c351bbc60d1adaf662e74047"}, - {file = "pillow-11.2.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:127bf6ac4a5b58b3d32fc8289656f77f80567d65660bc46f72c0d77e6600cc95"}, - {file = "pillow-11.2.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b4ba4be812c7a40280629e55ae0b14a0aafa150dd6451297562e1764808bbe61"}, - {file = "pillow-11.2.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c8bd62331e5032bc396a93609982a9ab6b411c05078a52f5fe3cc59234a3abd1"}, - {file = "pillow-11.2.1-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:562d11134c97a62fe3af29581f083033179f7ff435f78392565a1ad2d1c2c45c"}, - {file = "pillow-11.2.1-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:c97209e85b5be259994eb5b69ff50c5d20cca0f458ef9abd835e262d9d88b39d"}, - {file = "pillow-11.2.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:0c3e6d0f59171dfa2e25d7116217543310908dfa2770aa64b8f87605f8cacc97"}, - {file = "pillow-11.2.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:cc1c3bc53befb6096b84165956e886b1729634a799e9d6329a0c512ab651e579"}, - {file = "pillow-11.2.1-cp310-cp310-win32.whl", hash = "sha256:312c77b7f07ab2139924d2639860e084ec2a13e72af54d4f08ac843a5fc9c79d"}, - {file = "pillow-11.2.1-cp310-cp310-win_amd64.whl", hash = "sha256:9bc7ae48b8057a611e5fe9f853baa88093b9a76303937449397899385da06fad"}, - {file = "pillow-11.2.1-cp310-cp310-win_arm64.whl", hash = "sha256:2728567e249cdd939f6cc3d1f049595c66e4187f3c34078cbc0a7d21c47482d2"}, - {file = "pillow-11.2.1-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:35ca289f712ccfc699508c4658a1d14652e8033e9b69839edf83cbdd0ba39e70"}, - {file = "pillow-11.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e0409af9f829f87a2dfb7e259f78f317a5351f2045158be321fd135973fff7bf"}, - {file = "pillow-11.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d4e5c5edee874dce4f653dbe59db7c73a600119fbea8d31f53423586ee2aafd7"}, - {file = "pillow-11.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b93a07e76d13bff9444f1a029e0af2964e654bfc2e2c2d46bfd080df5ad5f3d8"}, - {file = "pillow-11.2.1-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:e6def7eed9e7fa90fde255afaf08060dc4b343bbe524a8f69bdd2a2f0018f600"}, - {file = "pillow-11.2.1-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:8f4f3724c068be008c08257207210c138d5f3731af6c155a81c2b09a9eb3a788"}, - {file = "pillow-11.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:a0a6709b47019dff32e678bc12c63008311b82b9327613f534e496dacaefb71e"}, - {file = "pillow-11.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f6b0c664ccb879109ee3ca702a9272d877f4fcd21e5eb63c26422fd6e415365e"}, - {file = "pillow-11.2.1-cp311-cp311-win32.whl", hash = "sha256:cc5d875d56e49f112b6def6813c4e3d3036d269c008bf8aef72cd08d20ca6df6"}, - {file = "pillow-11.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:0f5c7eda47bf8e3c8a283762cab94e496ba977a420868cb819159980b6709193"}, - {file = "pillow-11.2.1-cp311-cp311-win_arm64.whl", hash = "sha256:4d375eb838755f2528ac8cbc926c3e31cc49ca4ad0cf79cff48b20e30634a4a7"}, - {file = "pillow-11.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:78afba22027b4accef10dbd5eed84425930ba41b3ea0a86fa8d20baaf19d807f"}, - {file = "pillow-11.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:78092232a4ab376a35d68c4e6d5e00dfd73454bd12b230420025fbe178ee3b0b"}, - {file = "pillow-11.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25a5f306095c6780c52e6bbb6109624b95c5b18e40aab1c3041da3e9e0cd3e2d"}, - {file = "pillow-11.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c7b29dbd4281923a2bfe562acb734cee96bbb129e96e6972d315ed9f232bef4"}, - {file = "pillow-11.2.1-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:3e645b020f3209a0181a418bffe7b4a93171eef6c4ef6cc20980b30bebf17b7d"}, - {file = "pillow-11.2.1-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:b2dbea1012ccb784a65349f57bbc93730b96e85b42e9bf7b01ef40443db720b4"}, - {file = "pillow-11.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:da3104c57bbd72948d75f6a9389e6727d2ab6333c3617f0a89d72d4940aa0443"}, - {file = "pillow-11.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:598174aef4589af795f66f9caab87ba4ff860ce08cd5bb447c6fc553ffee603c"}, - {file = "pillow-11.2.1-cp312-cp312-win32.whl", hash = "sha256:1d535df14716e7f8776b9e7fee118576d65572b4aad3ed639be9e4fa88a1cad3"}, - {file = "pillow-11.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:14e33b28bf17c7a38eede290f77db7c664e4eb01f7869e37fa98a5aa95978941"}, - {file = "pillow-11.2.1-cp312-cp312-win_arm64.whl", hash = "sha256:21e1470ac9e5739ff880c211fc3af01e3ae505859392bf65458c224d0bf283eb"}, - {file = "pillow-11.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:fdec757fea0b793056419bca3e9932eb2b0ceec90ef4813ea4c1e072c389eb28"}, - {file = "pillow-11.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:b0e130705d568e2f43a17bcbe74d90958e8a16263868a12c3e0d9c8162690830"}, - {file = "pillow-11.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7bdb5e09068332578214cadd9c05e3d64d99e0e87591be22a324bdbc18925be0"}, - {file = "pillow-11.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d189ba1bebfbc0c0e529159631ec72bb9e9bc041f01ec6d3233d6d82eb823bc1"}, - {file = "pillow-11.2.1-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:191955c55d8a712fab8934a42bfefbf99dd0b5875078240943f913bb66d46d9f"}, - {file = "pillow-11.2.1-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:ad275964d52e2243430472fc5d2c2334b4fc3ff9c16cb0a19254e25efa03a155"}, - {file = "pillow-11.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:750f96efe0597382660d8b53e90dd1dd44568a8edb51cb7f9d5d918b80d4de14"}, - {file = "pillow-11.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fe15238d3798788d00716637b3d4e7bb6bde18b26e5d08335a96e88564a36b6b"}, - {file = "pillow-11.2.1-cp313-cp313-win32.whl", hash = "sha256:3fe735ced9a607fee4f481423a9c36701a39719252a9bb251679635f99d0f7d2"}, - {file = "pillow-11.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:74ee3d7ecb3f3c05459ba95eed5efa28d6092d751ce9bf20e3e253a4e497e691"}, - {file = "pillow-11.2.1-cp313-cp313-win_arm64.whl", hash = "sha256:5119225c622403afb4b44bad4c1ca6c1f98eed79db8d3bc6e4e160fc6339d66c"}, - {file = "pillow-11.2.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:8ce2e8411c7aaef53e6bb29fe98f28cd4fbd9a1d9be2eeea434331aac0536b22"}, - {file = "pillow-11.2.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:9ee66787e095127116d91dea2143db65c7bb1e232f617aa5957c0d9d2a3f23a7"}, - {file = "pillow-11.2.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9622e3b6c1d8b551b6e6f21873bdcc55762b4b2126633014cea1803368a9aa16"}, - {file = "pillow-11.2.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63b5dff3a68f371ea06025a1a6966c9a1e1ee452fc8020c2cd0ea41b83e9037b"}, - {file = "pillow-11.2.1-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:31df6e2d3d8fc99f993fd253e97fae451a8db2e7207acf97859732273e108406"}, - {file = "pillow-11.2.1-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:062b7a42d672c45a70fa1f8b43d1d38ff76b63421cbbe7f88146b39e8a558d91"}, - {file = "pillow-11.2.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:4eb92eca2711ef8be42fd3f67533765d9fd043b8c80db204f16c8ea62ee1a751"}, - {file = "pillow-11.2.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:f91ebf30830a48c825590aede79376cb40f110b387c17ee9bd59932c961044f9"}, - {file = "pillow-11.2.1-cp313-cp313t-win32.whl", hash = "sha256:e0b55f27f584ed623221cfe995c912c61606be8513bfa0e07d2c674b4516d9dd"}, - {file = "pillow-11.2.1-cp313-cp313t-win_amd64.whl", hash = "sha256:36d6b82164c39ce5482f649b437382c0fb2395eabc1e2b1702a6deb8ad647d6e"}, - {file = "pillow-11.2.1-cp313-cp313t-win_arm64.whl", hash = "sha256:225c832a13326e34f212d2072982bb1adb210e0cc0b153e688743018c94a2681"}, - {file = "pillow-11.2.1-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:7491cf8a79b8eb867d419648fff2f83cb0b3891c8b36da92cc7f1931d46108c8"}, - {file = "pillow-11.2.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8b02d8f9cb83c52578a0b4beadba92e37d83a4ef11570a8688bbf43f4ca50909"}, - {file = "pillow-11.2.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:014ca0050c85003620526b0ac1ac53f56fc93af128f7546623cc8e31875ab928"}, - {file = "pillow-11.2.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3692b68c87096ac6308296d96354eddd25f98740c9d2ab54e1549d6c8aea9d79"}, - {file = "pillow-11.2.1-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:f781dcb0bc9929adc77bad571b8621ecb1e4cdef86e940fe2e5b5ee24fd33b35"}, - {file = "pillow-11.2.1-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:2b490402c96f907a166615e9a5afacf2519e28295f157ec3a2bb9bd57de638cb"}, - {file = "pillow-11.2.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:dd6b20b93b3ccc9c1b597999209e4bc5cf2853f9ee66e3fc9a400a78733ffc9a"}, - {file = "pillow-11.2.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:4b835d89c08a6c2ee7781b8dd0a30209a8012b5f09c0a665b65b0eb3560b6f36"}, - {file = "pillow-11.2.1-cp39-cp39-win32.whl", hash = "sha256:b10428b3416d4f9c61f94b494681280be7686bda15898a3a9e08eb66a6d92d67"}, - {file = "pillow-11.2.1-cp39-cp39-win_amd64.whl", hash = "sha256:6ebce70c3f486acf7591a3d73431fa504a4e18a9b97ff27f5f47b7368e4b9dd1"}, - {file = "pillow-11.2.1-cp39-cp39-win_arm64.whl", hash = "sha256:c27476257b2fdcd7872d54cfd119b3a9ce4610fb85c8e32b70b42e3680a29a1e"}, - {file = "pillow-11.2.1-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:9b7b0d4fd2635f54ad82785d56bc0d94f147096493a79985d0ab57aedd563156"}, - {file = "pillow-11.2.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:aa442755e31c64037aa7c1cb186e0b369f8416c567381852c63444dd666fb772"}, - {file = "pillow-11.2.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f0d3348c95b766f54b76116d53d4cb171b52992a1027e7ca50c81b43b9d9e363"}, - {file = "pillow-11.2.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85d27ea4c889342f7e35f6d56e7e1cb345632ad592e8c51b693d7b7556043ce0"}, - {file = "pillow-11.2.1-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:bf2c33d6791c598142f00c9c4c7d47f6476731c31081331664eb26d6ab583e01"}, - {file = "pillow-11.2.1-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:e616e7154c37669fc1dfc14584f11e284e05d1c650e1c0f972f281c4ccc53193"}, - {file = "pillow-11.2.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:39ad2e0f424394e3aebc40168845fee52df1394a4673a6ee512d840d14ab3013"}, - {file = "pillow-11.2.1-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:80f1df8dbe9572b4b7abdfa17eb5d78dd620b1d55d9e25f834efdbee872d3aed"}, - {file = "pillow-11.2.1-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:ea926cfbc3957090becbcbbb65ad177161a2ff2ad578b5a6ec9bb1e1cd78753c"}, - {file = "pillow-11.2.1-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:738db0e0941ca0376804d4de6a782c005245264edaa253ffce24e5a15cbdc7bd"}, - {file = "pillow-11.2.1-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9db98ab6565c69082ec9b0d4e40dd9f6181dab0dd236d26f7a50b8b9bfbd5076"}, - {file = "pillow-11.2.1-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:036e53f4170e270ddb8797d4c590e6dd14d28e15c7da375c18978045f7e6c37b"}, - {file = "pillow-11.2.1-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:14f73f7c291279bd65fda51ee87affd7c1e097709f7fdd0188957a16c264601f"}, - {file = "pillow-11.2.1-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:208653868d5c9ecc2b327f9b9ef34e0e42a4cdd172c2988fd81d62d2bc9bc044"}, - {file = "pillow-11.2.1.tar.gz", hash = "sha256:a64dd61998416367b7ef979b73d3a85853ba9bec4c2925f74e588879a58716b6"}, + {file = "pillow-11.3.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1b9c17fd4ace828b3003dfd1e30bff24863e0eb59b535e8f80194d9cc7ecf860"}, + {file = "pillow-11.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:65dc69160114cdd0ca0f35cb434633c75e8e7fad4cf855177a05bf38678f73ad"}, + {file = "pillow-11.3.0-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f1f182ebd2303acf8c380a54f615ec883322593320a9b00438eb842c1f37ae50"}, + {file = "pillow-11.3.0-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4445fa62e15936a028672fd48c4c11a66d641d2c05726c7ec1f8ba6a572036ae"}, + {file = "pillow-11.3.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:71f511f6b3b91dd543282477be45a033e4845a40278fa8dcdbfdb07109bf18f9"}, + {file = "pillow-11.3.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:040a5b691b0713e1f6cbe222e0f4f74cd233421e105850ae3b3c0ceda520f42e"}, + {file = "pillow-11.3.0-cp310-cp310-win32.whl", hash = "sha256:89bd777bc6624fe4115e9fac3352c79ed60f3bb18651420635f26e643e3dd1f6"}, + {file = "pillow-11.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:19d2ff547c75b8e3ff46f4d9ef969a06c30ab2d4263a9e287733aa8b2429ce8f"}, + {file = "pillow-11.3.0-cp310-cp310-win_arm64.whl", hash = "sha256:819931d25e57b513242859ce1876c58c59dc31587847bf74cfe06b2e0cb22d2f"}, + {file = "pillow-11.3.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:1cd110edf822773368b396281a2293aeb91c90a2db00d78ea43e7e861631b722"}, + {file = "pillow-11.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9c412fddd1b77a75aa904615ebaa6001f169b26fd467b4be93aded278266b288"}, + {file = "pillow-11.3.0-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:643f189248837533073c405ec2f0bb250ba54598cf80e8c1e043381a60632f58"}, + {file = "pillow-11.3.0-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:106064daa23a745510dabce1d84f29137a37224831d88eb4ce94bb187b1d7e5f"}, + {file = "pillow-11.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:cd8ff254faf15591e724dc7c4ddb6bf4793efcbe13802a4ae3e863cd300b493e"}, + {file = "pillow-11.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:932c754c2d51ad2b2271fd01c3d121daaa35e27efae2a616f77bf164bc0b3e94"}, + {file = "pillow-11.3.0-cp311-cp311-win32.whl", hash = "sha256:b4b8f3efc8d530a1544e5962bd6b403d5f7fe8b9e08227c6b255f98ad82b4ba0"}, + {file = "pillow-11.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:1a992e86b0dd7aeb1f053cd506508c0999d710a8f07b4c791c63843fc6a807ac"}, + {file = "pillow-11.3.0-cp311-cp311-win_arm64.whl", hash = "sha256:30807c931ff7c095620fe04448e2c2fc673fcbb1ffe2a7da3fb39613489b1ddd"}, + {file = "pillow-11.3.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:fdae223722da47b024b867c1ea0be64e0df702c5e0a60e27daad39bf960dd1e4"}, + {file = "pillow-11.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:921bd305b10e82b4d1f5e802b6850677f965d8394203d182f078873851dada69"}, + {file = "pillow-11.3.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:97f07ed9f56a3b9b5f49d3661dc9607484e85c67e27f3e8be2c7d28ca032fec7"}, + {file = "pillow-11.3.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:676b2815362456b5b3216b4fd5bd89d362100dc6f4945154ff172e206a22c024"}, + {file = "pillow-11.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3e184b2f26ff146363dd07bde8b711833d7b0202e27d13540bfe2e35a323a809"}, + {file = "pillow-11.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:6be31e3fc9a621e071bc17bb7de63b85cbe0bfae91bb0363c893cbe67247780d"}, + {file = "pillow-11.3.0-cp312-cp312-win32.whl", hash = "sha256:7b161756381f0918e05e7cb8a371fff367e807770f8fe92ecb20d905d0e1c149"}, + {file = "pillow-11.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:a6444696fce635783440b7f7a9fc24b3ad10a9ea3f0ab66c5905be1c19ccf17d"}, + {file = "pillow-11.3.0-cp312-cp312-win_arm64.whl", hash = "sha256:2aceea54f957dd4448264f9bf40875da0415c83eb85f55069d89c0ed436e3542"}, + {file = "pillow-11.3.0-cp313-cp313-ios_13_0_arm64_iphoneos.whl", hash = "sha256:1c627742b539bba4309df89171356fcb3cc5a9178355b2727d1b74a6cf155fbd"}, + {file = "pillow-11.3.0-cp313-cp313-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:30b7c02f3899d10f13d7a48163c8969e4e653f8b43416d23d13d1bbfdc93b9f8"}, + {file = "pillow-11.3.0-cp313-cp313-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:7859a4cc7c9295f5838015d8cc0a9c215b77e43d07a25e460f35cf516df8626f"}, + {file = "pillow-11.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ec1ee50470b0d050984394423d96325b744d55c701a439d2bd66089bff963d3c"}, + {file = "pillow-11.3.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7db51d222548ccfd274e4572fdbf3e810a5e66b00608862f947b163e613b67dd"}, + {file = "pillow-11.3.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c37d8ba9411d6003bba9e518db0db0c58a680ab9fe5179f040b0463644bc9805"}, + {file = "pillow-11.3.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:13f87d581e71d9189ab21fe0efb5a23e9f28552d5be6979e84001d3b8505abe8"}, + {file = "pillow-11.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:023f6d2d11784a465f09fd09a34b150ea4672e85fb3d05931d89f373ab14abb2"}, + {file = "pillow-11.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:45dfc51ac5975b938e9809451c51734124e73b04d0f0ac621649821a63852e7b"}, + {file = "pillow-11.3.0-cp313-cp313-win32.whl", hash = "sha256:a4d336baed65d50d37b88ca5b60c0fa9d81e3a87d4a7930d3880d1624d5b31f3"}, + {file = "pillow-11.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:0bce5c4fd0921f99d2e858dc4d4d64193407e1b99478bc5cacecba2311abde51"}, + {file = "pillow-11.3.0-cp313-cp313-win_arm64.whl", hash = "sha256:1904e1264881f682f02b7f8167935cce37bc97db457f8e7849dc3a6a52b99580"}, + {file = "pillow-11.3.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:4c834a3921375c48ee6b9624061076bc0a32a60b5532b322cc0ea64e639dd50e"}, + {file = "pillow-11.3.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:5e05688ccef30ea69b9317a9ead994b93975104a677a36a8ed8106be9260aa6d"}, + {file = "pillow-11.3.0-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1f85acb69adf2aaee8b7da124efebbdb959a104db34d3a2cb0f3793dbae422a8"}, + {file = "pillow-11.3.0-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:05f6ecbeff5005399bb48d198f098a9b4b6bdf27b8487c7f38ca16eeb070cd59"}, + {file = "pillow-11.3.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:a7bc6e6fd0395bc052f16b1a8670859964dbd7003bd0af2ff08342eb6e442cfe"}, + {file = "pillow-11.3.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:83e1b0161c9d148125083a35c1c5a89db5b7054834fd4387499e06552035236c"}, + {file = "pillow-11.3.0-cp313-cp313t-win32.whl", hash = "sha256:2a3117c06b8fb646639dce83694f2f9eac405472713fcb1ae887469c0d4f6788"}, + {file = "pillow-11.3.0-cp313-cp313t-win_amd64.whl", hash = "sha256:857844335c95bea93fb39e0fa2726b4d9d758850b34075a7e3ff4f4fa3aa3b31"}, + {file = "pillow-11.3.0-cp313-cp313t-win_arm64.whl", hash = "sha256:8797edc41f3e8536ae4b10897ee2f637235c94f27404cac7297f7b607dd0716e"}, + {file = "pillow-11.3.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:d9da3df5f9ea2a89b81bb6087177fb1f4d1c7146d583a3fe5c672c0d94e55e12"}, + {file = "pillow-11.3.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:0b275ff9b04df7b640c59ec5a3cb113eefd3795a8df80bac69646ef699c6981a"}, + {file = "pillow-11.3.0-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:41742638139424703b4d01665b807c6468e23e699e8e90cffefe291c5832b027"}, + {file = "pillow-11.3.0-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:93efb0b4de7e340d99057415c749175e24c8864302369e05914682ba642e5d77"}, + {file = "pillow-11.3.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7966e38dcd0fa11ca390aed7c6f20454443581d758242023cf36fcb319b1a874"}, + {file = "pillow-11.3.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:98a9afa7b9007c67ed84c57c9e0ad86a6000da96eaa638e4f8abe5b65ff83f0a"}, + {file = "pillow-11.3.0-cp314-cp314-win32.whl", hash = "sha256:02a723e6bf909e7cea0dac1b0e0310be9d7650cd66222a5f1c571455c0a45214"}, + {file = "pillow-11.3.0-cp314-cp314-win_amd64.whl", hash = "sha256:a418486160228f64dd9e9efcd132679b7a02a5f22c982c78b6fc7dab3fefb635"}, + {file = "pillow-11.3.0-cp314-cp314-win_arm64.whl", hash = "sha256:155658efb5e044669c08896c0c44231c5e9abcaadbc5cd3648df2f7c0b96b9a6"}, + {file = "pillow-11.3.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:59a03cdf019efbfeeed910bf79c7c93255c3d54bc45898ac2a4140071b02b4ae"}, + {file = "pillow-11.3.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:f8a5827f84d973d8636e9dc5764af4f0cf2318d26744b3d902931701b0d46653"}, + {file = "pillow-11.3.0-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4c96f993ab8c98460cd0c001447bff6194403e8b1d7e149ade5f00594918128b"}, + {file = "pillow-11.3.0-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:41342b64afeba938edb034d122b2dda5db2139b9a4af999729ba8818e0056477"}, + {file = "pillow-11.3.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:068d9c39a2d1b358eb9f245ce7ab1b5c3246c7c8c7d9ba58cfa5b43146c06e50"}, + {file = "pillow-11.3.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:a1bc6ba083b145187f648b667e05a2534ecc4b9f2784c2cbe3089e44868f2b9b"}, + {file = "pillow-11.3.0-cp314-cp314t-win32.whl", hash = "sha256:118ca10c0d60b06d006be10a501fd6bbdfef559251ed31b794668ed569c87e12"}, + {file = "pillow-11.3.0-cp314-cp314t-win_amd64.whl", hash = "sha256:8924748b688aa210d79883357d102cd64690e56b923a186f35a82cbc10f997db"}, + {file = "pillow-11.3.0-cp314-cp314t-win_arm64.whl", hash = "sha256:79ea0d14d3ebad43ec77ad5272e6ff9bba5b679ef73375ea760261207fa8e0aa"}, + {file = "pillow-11.3.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:48d254f8a4c776de343051023eb61ffe818299eeac478da55227d96e241de53f"}, + {file = "pillow-11.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7aee118e30a4cf54fdd873bd3a29de51e29105ab11f9aad8c32123f58c8f8081"}, + {file = "pillow-11.3.0-cp39-cp39-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:092c80c76635f5ecb10f3f83d76716165c96f5229addbd1ec2bdbbda7d496e06"}, + {file = "pillow-11.3.0-cp39-cp39-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:cadc9e0ea0a2431124cde7e1697106471fc4c1da01530e679b2391c37d3fbb3a"}, + {file = "pillow-11.3.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:6a418691000f2a418c9135a7cf0d797c1bb7d9a485e61fe8e7722845b95ef978"}, + {file = "pillow-11.3.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:97afb3a00b65cc0804d1c7abddbf090a81eaac02768af58cbdcaaa0a931e0b6d"}, + {file = "pillow-11.3.0-cp39-cp39-win32.whl", hash = "sha256:ea944117a7974ae78059fcc1800e5d3295172bb97035c0c1d9345fca1419da71"}, + {file = "pillow-11.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:e5c5858ad8ec655450a7c7df532e9842cf8df7cc349df7225c60d5d348c8aada"}, + {file = "pillow-11.3.0-cp39-cp39-win_arm64.whl", hash = "sha256:6abdbfd3aea42be05702a8dd98832329c167ee84400a1d1f61ab11437f1717eb"}, + {file = "pillow-11.3.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:3cee80663f29e3843b68199b9d6f4f54bd1d4a6b59bdd91bceefc51238bcb967"}, + {file = "pillow-11.3.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:b5f56c3f344f2ccaf0dd875d3e180f631dc60a51b314295a3e681fe8cf851fbe"}, + {file = "pillow-11.3.0-pp310-pypy310_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:527b37216b6ac3a12d7838dc3bd75208ec57c1c6d11ef01902266a5a0c14fc27"}, + {file = "pillow-11.3.0-pp310-pypy310_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:be5463ac478b623b9dd3937afd7fb7ab3d79dd290a28e2b6df292dc75063eb8a"}, + {file = "pillow-11.3.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:8dc70ca24c110503e16918a658b869019126ecfe03109b754c402daff12b3d9f"}, + {file = "pillow-11.3.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7c8ec7a017ad1bd562f93dbd8505763e688d388cde6e4a010ae1486916e713e6"}, + {file = "pillow-11.3.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:9ab6ae226de48019caa8074894544af5b53a117ccb9d3b3dcb2871464c829438"}, + {file = "pillow-11.3.0-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5418b53c0d59b3824d05e029669efa023bbef0f3e92e75ec8428f3799487f361"}, + {file = "pillow-11.3.0-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:504b6f59505f08ae014f724b6207ff6222662aab5cc9542577fb084ed0676ac7"}, + {file = "pillow-11.3.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:c84d689db21a1c397d001aa08241044aa2069e7587b398c8cc63020390b1c1b8"}, + {file = "pillow-11.3.0.tar.gz", hash = "sha256:3828ee7586cd0b2091b6209e5ad53e20d0649bbe87164a459d0676e035e8f523"}, ] [package.extras] -docs = ["furo", "olefile", "sphinx (>=8.2)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinxext-opengraph"] +docs = ["furo", "olefile", "sphinx (>=8.2)", "sphinx-autobuild", "sphinx-copybutton", "sphinx-inline-tabs", "sphinxext-opengraph"] fpx = ["olefile"] mic = ["olefile"] test-arrow = ["pyarrow"] -tests = ["check-manifest", "coverage (>=7.4.2)", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout", "trove-classifiers (>=2024.10.12)"] +tests = ["check-manifest", "coverage (>=7.4.2)", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "trove-classifiers (>=2024.10.12)"] typing = ["typing-extensions"] xmp = ["defusedxml"] @@ -1705,14 +1711,14 @@ test = ["pytest", "pytest-xdist", "setuptools"] [[package]] name = "pygments" -version = "2.19.1" +version = "2.19.2" description = "Pygments is a syntax highlighting package written in Python." optional = false python-versions = ">=3.8" groups = ["main"] files = [ - {file = "pygments-2.19.1-py3-none-any.whl", hash = "sha256:9ea1544ad55cecf4b8242fab6dd35a93bbce657034b0611ee383099054ab6d8c"}, - {file = "pygments-2.19.1.tar.gz", hash = "sha256:61c16d2a8576dc0649d9f39e089b5f02bcd27fba10d8fb4dcc28173f7a45151f"}, + {file = "pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b"}, + {file = "pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887"}, ] [package.extras] @@ -1720,14 +1726,14 @@ windows-terminal = ["colorama (>=0.4.6)"] [[package]] name = "pymdown-extensions" -version = "10.15" +version = "10.16" description = "Extension pack for Python Markdown." optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" groups = ["main"] files = [ - {file = "pymdown_extensions-10.15-py3-none-any.whl", hash = "sha256:46e99bb272612b0de3b7e7caf6da8dd5f4ca5212c0b273feb9304e236c484e5f"}, - {file = "pymdown_extensions-10.15.tar.gz", hash = "sha256:0e5994e32155f4b03504f939e501b981d306daf7ec2aa1cd2eb6bd300784f8f7"}, + {file = "pymdown_extensions-10.16-py3-none-any.whl", hash = "sha256:f5dd064a4db588cb2d95229fc4ee63a1b16cc8b4d0e6145c0899ed8723da1df2"}, + {file = "pymdown_extensions-10.16.tar.gz", hash = "sha256:71dac4fca63fabeffd3eb9038b756161a33ec6e8d230853d3cecf562155ab3de"}, ] [package.dependencies] diff --git a/robot_nav/models/MARL/hsAttention.py b/robot_nav/models/MARL/hardsoftAttention.py similarity index 100% rename from robot_nav/models/MARL/hsAttention.py rename to robot_nav/models/MARL/hardsoftAttention.py diff --git a/robot_nav/models/MARL/marlTD3.py b/robot_nav/models/MARL/marlTD3.py index 39e4b44..9fd4508 100644 --- a/robot_nav/models/MARL/marlTD3.py +++ b/robot_nav/models/MARL/marlTD3.py @@ -7,7 +7,7 @@ from numpy import inf from torch.utils.tensorboard import SummaryWriter -from robot_nav.models.MARL.hsAttention import Attention +from robot_nav.models.MARL.hardsoftAttention import Attention class Actor(nn.Module):