1+ import logging
12import gym
23import numpy as np
34from mlagents .envs import UnityEnvironment
4- from gym import error , spaces , logger
5+ from gym import error , spaces
56
67
78class UnityGymException (error .Error ):
@@ -11,6 +12,10 @@ class UnityGymException(error.Error):
1112 pass
1213
1314
15+ logging .basicConfig (level = logging .INFO )
16+ logger = logging .getLogger ("gym_unity" )
17+
18+
1419class UnityEnv (gym .Env ):
1520 """
1621 Provides Gym wrapper for Unity Learning Environments.
@@ -44,7 +49,11 @@ def __init__(self, environment_filename: str, worker_id=0, use_visual=False, mul
4449 if use_visual and brain .number_visual_observations == 0 :
4550 raise UnityGymException ("`use_visual` was set to True, however there are no"
4651 " visual observations as part of this environment." )
47- self .use_visual = brain .number_visual_observations >= 1 and use_visual
52+ self .use_visual = brain .number_visual_observations >= 1 and use_visual
53+
54+ if brain .number_visual_observations > 1 :
55+ logger .warning ("The environment contains more than one visual observation. "
56+ "Please note that only the first will be provided in the observation." )
4857
4958 if brain .num_stacked_vector_observations != 1 :
5059 raise UnityGymException (
@@ -114,7 +123,8 @@ def step(self, action):
114123 if not isinstance (action , list ):
115124 raise UnityGymException ("The environment was expecting `action` to be a list." )
116125 if len (action ) != self ._n_agents :
117- raise UnityGymException ("The environment was expecting a list of {} actions." .format (self ._n_agents ))
126+ raise UnityGymException (
127+ "The environment was expecting a list of {} actions." .format (self ._n_agents ))
118128 else :
119129 action = np .array (action )
120130
@@ -136,17 +146,19 @@ def _single_step(self, info):
136146 else :
137147 default_observation = info .vector_observations [0 , :]
138148
139- return default_observation , info .rewards [0 ], info .local_done [0 ], {"text_observation" : info .text_observations [0 ],
140- "brain_info" : info }
149+ return default_observation , info .rewards [0 ], info .local_done [0 ], {
150+ "text_observation" : info .text_observations [0 ],
151+ "brain_info" : info }
141152
142153 def _multi_step (self , info ):
143154 if self .use_visual :
144155 self .visual_obs = info .visual_observations
145156 default_observation = self .visual_obs
146157 else :
147158 default_observation = info .vector_observations
148- return list (default_observation ), info .rewards , info .local_done , {"text_observation" : info .text_observations ,
149- "brain_info" : info }
159+ return list (default_observation ), info .rewards , info .local_done , {
160+ "text_observation" : info .text_observations ,
161+ "brain_info" : info }
150162
151163 def render (self , mode = 'rgb_array' ):
152164 return self .visual_obs
@@ -170,11 +182,13 @@ def seed(self, seed=None):
170182
171183 def _check_agents (self , n_agents ):
172184 if not self ._multiagent and n_agents > 1 :
173- raise UnityGymException ("The environment was launched as a single-agent environment, however"
174- "there is more than one agent in the scene." )
185+ raise UnityGymException (
186+ "The environment was launched as a single-agent environment, however"
187+ "there is more than one agent in the scene." )
175188 elif self ._multiagent and n_agents <= 1 :
176- raise UnityGymException ("The environment was launched as a mutli-agent environment, however"
177- "there is only one agent in the scene." )
189+ raise UnityGymException (
190+ "The environment was launched as a mutli-agent environment, however"
191+ "there is only one agent in the scene." )
178192 if self ._n_agents is None :
179193 self ._n_agents = n_agents
180194 logger .info ("{} agents within environment." .format (n_agents ))
0 commit comments