Skip to content

Commit 1a012b7

Browse files
committed
Update all usages of np.Inf to np.inf.
Users may get a attribute error on using np.Inf for newer numpy versions. This should be a non-breaking change since np.inf is available in older numpy versions (and already used across this repo).
1 parent 9dfc1e8 commit 1a012b7

File tree

3 files changed

+10
-10
lines changed

3 files changed

+10
-10
lines changed

src/overcooked_ai_py/agents/agent.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -445,7 +445,7 @@ def get_lowest_cost_action_and_goal(self, start_pos_and_or, motion_goals):
445445
Chooses motion goal that has the lowest cost action plan.
446446
Returns the motion goal itself and the first action on the plan.
447447
"""
448-
min_cost = np.Inf
448+
min_cost = np.inf
449449
best_action, best_goal = None, None
450450
for goal in motion_goals:
451451
action_plan, _, plan_cost = self.mlam.motion_planner.get_plan(

src/overcooked_ai_py/mdp/overcooked_env.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -424,7 +424,7 @@ def execute_plan(self, start_state, joint_action_plan, display=False):
424424
return successor_state, done
425425

426426
def run_agents(
427-
self,
427+
self,np.inf
428428
agent_pair,
429429
include_final_state=False,
430430
display=False,
@@ -485,7 +485,7 @@ def run_agents(
485485

486486
def get_rollouts(
487487
self,
488-
agent_pair,
488+
agent_pair,np.inf
489489
num_games,
490490
display=False,
491491
dir=None,

src/overcooked_ai_py/planning/planners.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -165,7 +165,7 @@ def get_gridworld_pos_distance(self, pos1, pos2):
165165
to go from starting position to goal position (not including
166166
interaction action)."""
167167
# NOTE: currently unused, pretty bad code. If used in future, clean up
168-
min_cost = np.Inf
168+
min_cost = np.inf
169169
for d1, d2 in itertools.product(Direction.ALL_DIRECTIONS, repeat=2):
170170
start = (pos1, d1)
171171
end = (pos2, d2)
@@ -364,8 +364,8 @@ def min_cost_between_features(
364364
Determines the minimum number of timesteps necessary for a player to go from any
365365
terrain feature in list1 to any feature in list2 and perform an interact action
366366
"""
367-
min_dist = np.Inf
368-
min_manhattan = np.Inf
367+
min_dist = np.inf
368+
min_manhattan = np.inf
369369
for pos1, pos2 in itertools.product(pos_list1, pos_list2):
370370
for mg1, mg2 in itertools.product(
371371
self.motion_goals_for_pos[pos1],
@@ -383,7 +383,7 @@ def min_cost_between_features(
383383
min_dist = curr_dist
384384

385385
# +1 to account for interaction action
386-
if manhattan_if_fail and min_dist == np.Inf:
386+
if manhattan_if_fail and min_dist == np.inf:
387387
min_dist = min_manhattan
388388
min_cost = min_dist + 1
389389
return min_cost
@@ -401,7 +401,7 @@ def min_cost_to_feature(
401401
"""
402402
start_pos = start_pos_and_or[0]
403403
assert self.mdp.get_terrain_type_at_pos(start_pos) != "X"
404-
min_dist = np.Inf
404+
min_dist = np.inf
405405
best_feature = None
406406
for feature_pos in feature_pos_list:
407407
for feature_goal in self.motion_goals_for_pos[feature_pos]:
@@ -841,7 +841,7 @@ def _handle_conflict_with_same_goal_idx(
841841
if self._agents_are_in_same_position(
842842
(curr_pos_or0, curr_pos_or1)
843843
):
844-
return None, None, [np.Inf, np.Inf]
844+
return None, None, [np.inf, np.inf]
845845

846846
else:
847847
curr_pos_or0, curr_pos_or1 = next_pos_or0, next_pos_or1
@@ -860,7 +860,7 @@ def _handle_conflict_with_same_goal_idx(
860860

861861
end_pos_and_or = (curr_pos_or0, curr_pos_or1)
862862
finishing_times = (
863-
(np.Inf, idx1) if wait_agent_idx == 0 else (idx0, np.Inf)
863+
(np.inf, idx1) if wait_agent_idx == 0 else (idx0, np.inf)
864864
)
865865
return joint_plan, end_pos_and_or, finishing_times
866866

0 commit comments

Comments
 (0)