Skip to content

Commit ab18283

Browse files
author
Ervin T
authored
[release] Fix rl trainer warning (#5144)
* Fix rl trainer warning * Fix typo
1 parent 7868faa commit ab18283

File tree

2 files changed

+33
-1
lines changed

2 files changed

+33
-1
lines changed

ml-agents/mlagents/trainers/tests/test_rl_trainer.py

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
import os
2+
import unittest
23
from unittest import mock
34
import pytest
45
import mlagents.trainers.tests.mock_brain as mb
@@ -178,3 +179,34 @@ def test_summary_checkpoint(mock_add_checkpoint, mock_write_summary):
178179
for step in checkpoint_range
179180
]
180181
mock_add_checkpoint.assert_has_calls(add_checkpoint_calls)
182+
183+
184+
class RLTrainerWarningTest(unittest.TestCase):
185+
def test_warning_group_reward(self):
186+
with self.assertLogs("mlagents.trainers", level="WARN") as cm:
187+
rl_trainer = create_rl_trainer()
188+
# This one should warn
189+
trajectory = mb.make_fake_trajectory(
190+
length=10,
191+
observation_specs=create_observation_specs_with_shapes([(1,)]),
192+
max_step_complete=True,
193+
action_spec=ActionSpec.create_discrete((2,)),
194+
group_reward=1.0,
195+
)
196+
buff = trajectory.to_agentbuffer()
197+
rl_trainer._warn_if_group_reward(buff)
198+
assert len(cm.output) > 0
199+
len_of_first_warning = len(cm.output)
200+
201+
rl_trainer = create_rl_trainer()
202+
# This one shouldn't
203+
trajectory = mb.make_fake_trajectory(
204+
length=10,
205+
observation_specs=create_observation_specs_with_shapes([(1,)]),
206+
max_step_complete=True,
207+
action_spec=ActionSpec.create_discrete((2,)),
208+
)
209+
buff = trajectory.to_agentbuffer()
210+
rl_trainer._warn_if_group_reward(buff)
211+
# Make sure warnings don't get bigger
212+
assert len(cm.output) == len_of_first_warning

ml-agents/mlagents/trainers/trainer/rl_trainer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -263,7 +263,7 @@ def _warn_if_group_reward(self, buffer: AgentBuffer) -> None:
263263
Warn if the trainer receives a Group Reward but isn't a multiagent trainer (e.g. POCA).
264264
"""
265265
if not self._has_warned_group_rewards:
266-
if not np.any(buffer[BufferKey.GROUP_REWARD]):
266+
if np.any(buffer[BufferKey.GROUP_REWARD]):
267267
logger.warning(
268268
"An agent recieved a Group Reward, but you are not using a multi-agent trainer. "
269269
"Please use the POCA trainer for best results."

0 commit comments

Comments
 (0)