Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion src/overcooked_ai_py/agents/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -445,7 +445,7 @@ def get_lowest_cost_action_and_goal(self, start_pos_and_or, motion_goals):
Chooses motion goal that has the lowest cost action plan.
Returns the motion goal itself and the first action on the plan.
"""
min_cost = np.Inf
min_cost = np.inf
best_action, best_goal = None, None
for goal in motion_goals:
action_plan, _, plan_cost = self.mlam.motion_planner.get_plan(
Expand Down
2 changes: 1 addition & 1 deletion src/overcooked_ai_py/mdp/layout_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -399,7 +399,7 @@ def get_random_starting_positions(self, grid, divider_x=None):
class Grid(object):
def __init__(self, shape):
assert len(shape) == 2, "Grid must be 2 dimensional"
grid = (np.ones(shape) * TYPE_TO_CODE[COUNTER]).astype(np.int)
grid = (np.ones(shape) * TYPE_TO_CODE[COUNTER]).astype(int)
self.mtx = grid
self.shape = np.array(shape)
self.width = shape[0]
Expand Down
4 changes: 2 additions & 2 deletions src/overcooked_ai_py/mdp/overcooked_env.py
Original file line number Diff line number Diff line change
Expand Up @@ -430,7 +430,7 @@ def run_agents(
display=False,
dir=None,
display_phi=False,
display_until=np.Inf,
display_until=np.inf,
):
"""
Trajectory returned will a list of state-action pairs (s_t, joint_a_t, r_t, done_t, info_t).
Expand Down Expand Up @@ -491,7 +491,7 @@ def get_rollouts(
dir=None,
final_state=False,
display_phi=False,
display_until=np.Inf,
display_until=np.inf,
metadata_fn=None,
metadata_info_fn=None,
info=True,
Expand Down
14 changes: 7 additions & 7 deletions src/overcooked_ai_py/planning/planners.py
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ def get_gridworld_pos_distance(self, pos1, pos2):
to go from starting position to goal position (not including
interaction action)."""
# NOTE: currently unused, pretty bad code. If used in future, clean up
min_cost = np.Inf
min_cost = np.inf
for d1, d2 in itertools.product(Direction.ALL_DIRECTIONS, repeat=2):
start = (pos1, d1)
end = (pos2, d2)
Expand Down Expand Up @@ -364,8 +364,8 @@ def min_cost_between_features(
Determines the minimum number of timesteps necessary for a player to go from any
terrain feature in list1 to any feature in list2 and perform an interact action
"""
min_dist = np.Inf
min_manhattan = np.Inf
min_dist = np.inf
min_manhattan = np.inf
for pos1, pos2 in itertools.product(pos_list1, pos_list2):
for mg1, mg2 in itertools.product(
self.motion_goals_for_pos[pos1],
Expand All @@ -383,7 +383,7 @@ def min_cost_between_features(
min_dist = curr_dist

# +1 to account for interaction action
if manhattan_if_fail and min_dist == np.Inf:
if manhattan_if_fail and min_dist == np.inf:
min_dist = min_manhattan
min_cost = min_dist + 1
return min_cost
Expand All @@ -401,7 +401,7 @@ def min_cost_to_feature(
"""
start_pos = start_pos_and_or[0]
assert self.mdp.get_terrain_type_at_pos(start_pos) != "X"
min_dist = np.Inf
min_dist = np.inf
best_feature = None
for feature_pos in feature_pos_list:
for feature_goal in self.motion_goals_for_pos[feature_pos]:
Expand Down Expand Up @@ -841,7 +841,7 @@ def _handle_conflict_with_same_goal_idx(
if self._agents_are_in_same_position(
(curr_pos_or0, curr_pos_or1)
):
return None, None, [np.Inf, np.Inf]
return None, None, [np.inf, np.inf]

else:
curr_pos_or0, curr_pos_or1 = next_pos_or0, next_pos_or1
Expand All @@ -860,7 +860,7 @@ def _handle_conflict_with_same_goal_idx(

end_pos_and_or = (curr_pos_or0, curr_pos_or1)
finishing_times = (
(np.Inf, idx1) if wait_agent_idx == 0 else (idx0, np.Inf)
(np.inf, idx1) if wait_agent_idx == 0 else (idx0, np.inf)
)
return joint_plan, end_pos_and_or, finishing_times

Expand Down
Loading