|
|
|
|
|
|
import logging |
|
|
|
from gym import error, spaces, logger |
|
|
|
from gym import error, spaces |
|
|
|
|
|
|
|
|
|
|
|
class UnityGymException(error.Error): |
|
|
|
|
|
|
pass |
|
|
|
|
|
|
|
|
|
|
|
logging.basicConfig(level=logging.INFO) |
|
|
|
logger = logging.getLogger("gym_unity") |
|
|
|
|
|
|
|
|
|
|
|
class UnityEnv(gym.Env): |
|
|
|
|
|
|
if use_visual and brain.number_visual_observations == 0: |
|
|
|
raise UnityGymException("`use_visual` was set to True, however there are no" |
|
|
|
" visual observations as part of this environment.") |
|
|
|
self.use_visual = brain.number_visual_observations >= 1 and use_visual |
|
|
|
self.use_visual = brain.number_visual_observations >= 1 and use_visual |
|
|
|
|
|
|
|
if brain.number_visual_observations > 1: |
|
|
|
logger.warning("The environment contains more than one visual observation. " |
|
|
|
"Please note that only the first will be provided in the observation.") |
|
|
|
|
|
|
|
if brain.num_stacked_vector_observations != 1: |
|
|
|
raise UnityGymException( |
|
|
|
|
|
|
if not isinstance(action, list): |
|
|
|
raise UnityGymException("The environment was expecting `action` to be a list.") |
|
|
|
if len(action) != self._n_agents: |
|
|
|
raise UnityGymException("The environment was expecting a list of {} actions.".format(self._n_agents)) |
|
|
|
raise UnityGymException( |
|
|
|
"The environment was expecting a list of {} actions.".format(self._n_agents)) |
|
|
|
else: |
|
|
|
action = np.array(action) |
|
|
|
|
|
|
|
|
|
|
else: |
|
|
|
default_observation = info.vector_observations[0, :] |
|
|
|
|
|
|
|
return default_observation, info.rewards[0], info.local_done[0], {"text_observation": info.text_observations[0], |
|
|
|
"brain_info": info} |
|
|
|
return default_observation, info.rewards[0], info.local_done[0], { |
|
|
|
"text_observation": info.text_observations[0], |
|
|
|
"brain_info": info} |
|
|
|
|
|
|
|
def _multi_step(self, info): |
|
|
|
if self.use_visual: |
|
|
|
|
|
|
default_observation = info.vector_observations |
|
|
|
return list(default_observation), info.rewards, info.local_done, {"text_observation": info.text_observations, |
|
|
|
"brain_info": info} |
|
|
|
return list(default_observation), info.rewards, info.local_done, { |
|
|
|
"text_observation": info.text_observations, |
|
|
|
"brain_info": info} |
|
|
|
|
|
|
|
def render(self, mode='rgb_array'): |
|
|
|
return self.visual_obs |
|
|
|
|
|
|
|
|
|
|
def _check_agents(self, n_agents): |
|
|
|
if not self._multiagent and n_agents > 1: |
|
|
|
raise UnityGymException("The environment was launched as a single-agent environment, however" |
|
|
|
"there is more than one agent in the scene.") |
|
|
|
raise UnityGymException( |
|
|
|
"The environment was launched as a single-agent environment, however" |
|
|
|
"there is more than one agent in the scene.") |
|
|
|
raise UnityGymException("The environment was launched as a mutli-agent environment, however" |
|
|
|
"there is only one agent in the scene.") |
|
|
|
raise UnityGymException( |
|
|
|
"The environment was launched as a mutli-agent environment, however" |
|
|
|
"there is only one agent in the scene.") |
|
|
|
if self._n_agents is None: |
|
|
|
self._n_agents = n_agents |
|
|
|
logger.info("{} agents within environment.".format(n_agents)) |
|
|
|