浏览代码

Python code reformat via [`black`](https://github.com/ambv/black).

Features:
- Reformat code via black.
- Adding circleci configurations.
- Add contribution guidelines.

Steps to reproduce:
- `pip install black`
- `black <source code directory>`
/develop-generalizationTraining-TrainerController
eshvk 6 年前
当前提交
ef8009d9
共有 74 个文件被更改,包括 6023 次插入3767 次删除
  1. 9
      .circleci/config.yml
  2. 5
      CONTRIBUTING.md
  3. 131
      gym-unity/gym_unity/envs/unity_env.py
  4. 67
      gym-unity/gym_unity/tests/test_gym.py
  5. 21
      gym-unity/setup.py
  6. 4
      ml-agents-envs/mlagents/envs/base_unity_environment.py
  7. 141
      ml-agents-envs/mlagents/envs/brain.py
  8. 1
      ml-agents-envs/mlagents/envs/communicator.py
  9. 212
      ml-agents-envs/mlagents/envs/communicator_objects/agent_action_proto_pb2.py
  10. 387
      ml-agents-envs/mlagents/envs/communicator_objects/agent_info_proto_pb2.py
  11. 298
      ml-agents-envs/mlagents/envs/communicator_objects/brain_parameters_proto_pb2.py
  12. 61
      ml-agents-envs/mlagents/envs/communicator_objects/command_proto_pb2.py
  13. 72
      ml-agents-envs/mlagents/envs/communicator_objects/custom_action_pb2.py
  14. 72
      ml-agents-envs/mlagents/envs/communicator_objects/custom_observation_pb2.py
  15. 72
      ml-agents-envs/mlagents/envs/communicator_objects/custom_reset_parameters_pb2.py
  16. 198
      ml-agents-envs/mlagents/envs/communicator_objects/demonstration_meta_proto_pb2.py
  17. 223
      ml-agents-envs/mlagents/envs/communicator_objects/engine_configuration_proto_pb2.py
  18. 250
      ml-agents-envs/mlagents/envs/communicator_objects/environment_parameters_proto_pb2.py
  19. 123
      ml-agents-envs/mlagents/envs/communicator_objects/header_pb2.py
  20. 148
      ml-agents-envs/mlagents/envs/communicator_objects/resolution_proto_pb2.py
  21. 62
      ml-agents-envs/mlagents/envs/communicator_objects/space_type_proto_pb2.py
  22. 148
      ml-agents-envs/mlagents/envs/communicator_objects/unity_input_pb2.py
  23. 182
      ml-agents-envs/mlagents/envs/communicator_objects/unity_message_pb2.py
  24. 148
      ml-agents-envs/mlagents/envs/communicator_objects/unity_output_pb2.py
  25. 100
      ml-agents-envs/mlagents/envs/communicator_objects/unity_rl_initialization_input_pb2.py
  26. 225
      ml-agents-envs/mlagents/envs/communicator_objects/unity_rl_initialization_output_pb2.py
  27. 398
      ml-agents-envs/mlagents/envs/communicator_objects/unity_rl_input_pb2.py
  28. 326
      ml-agents-envs/mlagents/envs/communicator_objects/unity_rl_output_pb2.py
  29. 62
      ml-agents-envs/mlagents/envs/communicator_objects/unity_to_external_pb2.py
  30. 55
      ml-agents-envs/mlagents/envs/communicator_objects/unity_to_external_pb2_grpc.py
  31. 358
      ml-agents-envs/mlagents/envs/environment.py
  32. 39
      ml-agents-envs/mlagents/envs/exception.py
  33. 63
      ml-agents-envs/mlagents/envs/mock_communicator.py
  34. 10
      ml-agents-envs/mlagents/envs/rpc_communicator.py
  35. 17
      ml-agents-envs/mlagents/envs/socket_communicator.py
  36. 88
      ml-agents-envs/mlagents/envs/subprocess_environment.py
  37. 124
      ml-agents-envs/mlagents/envs/tests/test_envs.py
  38. 1
      ml-agents-envs/mlagents/envs/tests/test_rpc_communicator.py
  39. 60
      ml-agents-envs/mlagents/envs/tests/test_subprocess_unity_environment.py
  40. 38
      ml-agents-envs/setup.py
  41. 2
      ml-agents/mlagents/trainers/__init__.py
  42. 335
      ml-agents/mlagents/trainers/barracuda.py
  43. 100
      ml-agents/mlagents/trainers/bc/models.py
  44. 58
      ml-agents/mlagents/trainers/bc/offline_trainer.py
  45. 111
      ml-agents/mlagents/trainers/bc/online_trainer.py
  46. 61
      ml-agents/mlagents/trainers/bc/policy.py
  47. 53
      ml-agents/mlagents/trainers/bc/trainer.py
  48. 73
      ml-agents/mlagents/trainers/buffer.py
  49. 67
      ml-agents/mlagents/trainers/curriculum.py
  50. 44
      ml-agents/mlagents/trainers/demo_loader.py
  51. 5
      ml-agents/mlagents/trainers/exception.py
  52. 210
      ml-agents/mlagents/trainers/learn.py
  53. 56
      ml-agents/mlagents/trainers/meta_curriculum.py
  54. 464
      ml-agents/mlagents/trainers/models.py
  55. 83
      ml-agents/mlagents/trainers/policy.py
  56. 205
      ml-agents/mlagents/trainers/ppo/models.py
  57. 179
      ml-agents/mlagents/trainers/ppo/policy.py
  58. 321
      ml-agents/mlagents/trainers/ppo/trainer.py
  59. 877
      ml-agents/mlagents/trainers/tensorflow_to_barracuda.py
  60. 12
      ml-agents/mlagents/trainers/tests/test_barracuda_converter.py
  61. 110
      ml-agents/mlagents/trainers/tests/test_bc.py
  62. 62
      ml-agents/mlagents/trainers/tests/test_buffer.py
  63. 31
      ml-agents/mlagents/trainers/tests/test_curriculum.py
  64. 12
      ml-agents/mlagents/trainers/tests/test_demo_loader.py
  65. 60
      ml-agents/mlagents/trainers/tests/test_learn.py
  66. 98
      ml-agents/mlagents/trainers/tests/test_meta_curriculum.py
  67. 31
      ml-agents/mlagents/trainers/tests/test_policy.py
  68. 391
      ml-agents/mlagents/trainers/tests/test_ppo.py
  69. 206
      ml-agents/mlagents/trainers/tests/test_trainer_controller.py
  70. 47
      ml-agents/mlagents/trainers/tests/test_trainer_metrics.py
  71. 117
      ml-agents/mlagents/trainers/trainer.py
  72. 195
      ml-agents/mlagents/trainers/trainer_controller.py
  73. 53
      ml-agents/mlagents/trainers/trainer_metrics.py
  74. 62
      ml-agents/setup.py

9
.circleci/config.yml


version: 2.1
version: 2.0
jobs:
build:

. venv/bin/activate
cd ml-agents-envs && pip install -e .
cd ../ml-agents && pip install -e .
pip install pytest-cov==2.6.1 codacy-coverage==1.3.11
pip install black pytest-cov==2.6.1 codacy-coverage==1.3.11
cd ../gym-unity && pip install -e .
- save_cache:

- run:
name: Run Tests for ml-agents and gym_unity
command: |

python-codacy-coverage -r coverage.xml
black --check ml-agents
black --check ml-agents-envs
black --check gym-unity
- store_test_results:
path: test-reports

5
CONTRIBUTING.md


## Style Guide
When performing changes to the codebase, ensure that you follow the style guide
of the file you're modifying. For Python, we follow
[PEP 8](https://www.python.org/dev/peps/pep-0008/).
For C#, we will soon be adding a formal style guide for our repository.
When performing changes to the codebase, please ensure that all python code is reformatted using the [black](https://github.com/ambv/black) formatter. For C#, we will soon be requirements for style and formatting.

131
gym-unity/gym_unity/envs/unity_env.py


"""
Any error related to the gym wrapper of ml-agents.
"""
pass

https://github.com/openai/multiagent-particle-envs
"""
def __init__(self, environment_filename: str, worker_id=0, use_visual=False, uint8_visual=False, multiagent=False, flatten_branched=False):
def __init__(
self,
environment_filename: str,
worker_id=0,
use_visual=False,
uint8_visual=False,
multiagent=False,
flatten_branched=False,
):
"""
Environment initialization
:param environment_filename: The UnityEnvironment path or file to be wrapped in the gym.

self._n_agents = None
self._multiagent = multiagent
self._flattener = None
self.game_over = False # Hidden flag used by Atari environments to determine if the game is over
self.game_over = (
False
) # Hidden flag used by Atari environments to determine if the game is over
"if it is wrapped in a gym.")
"if it is wrapped in a gym."
)
raise UnityGymException("`use_visual` was set to True, however there are no"
" visual observations as part of this environment.")
raise UnityGymException(
"`use_visual` was set to True, however there are no"
" visual observations as part of this environment."
)
logger.warning("`uint8_visual was set to true, but visual observations are not in use. "
"This setting will not have any effect.")
logger.warning(
"`uint8_visual was set to true, but visual observations are not in use. "
"This setting will not have any effect."
)
logger.warning("The environment contains more than one visual observation. "
"Please note that only the first will be provided in the observation.")
logger.warning(
"The environment contains more than one visual observation. "
"Please note that only the first will be provided in the observation."
)
"if it is wrapped in a gym.")
"if it is wrapped in a gym."
)
# Check for number of agents in scene.
initial_info = self._env.reset()[self.brain_name]

self._flattener = ActionFlattener(brain.vector_action_space_size)
self._action_space = self._flattener.action_space
else:
self._action_space = spaces.MultiDiscrete(brain.vector_action_space_size)
self._action_space = spaces.MultiDiscrete(
brain.vector_action_space_size
)
logger.warning("The environment has a non-discrete action space. It will "
"not be flattened.")
logger.warning(
"The environment has a non-discrete action space. It will "
"not be flattened."
)
high = np.array([1] * brain.vector_action_space_size[0])
self._action_space = spaces.Box(-high, high, dtype=np.float32)
high = np.array([np.inf] * brain.vector_observation_space_size)

depth = 1
else:
depth = 3
self._observation_space = spaces.Box(0, 1, dtype=np.float32,
shape=(brain.camera_resolutions[0]["height"],
brain.camera_resolutions[0]["width"],
depth))
self._observation_space = spaces.Box(
0,
1,
dtype=np.float32,
shape=(
brain.camera_resolutions[0]["height"],
brain.camera_resolutions[0]["width"],
depth,
),
)
else:
self._observation_space = spaces.Box(-high, high, dtype=np.float32)

# Use random actions for all other agents in environment.
if self._multiagent:
if not isinstance(action, list):
raise UnityGymException("The environment was expecting `action` to be a list.")
raise UnityGymException(
"The environment was expecting `action` to be a list."
)
"The environment was expecting a list of {} actions.".format(self._n_agents))
"The environment was expecting a list of {} actions.".format(
self._n_agents
)
)
else:
if self._flattener is not None:
# Action space is discrete and flattened - we expect a list of scalars

def _single_step(self, info):
if self.use_visual:
self.visual_obs = self._preprocess_single(info.visual_observations[0][0, :, :, :])
self.visual_obs = self._preprocess_single(
info.visual_observations[0][0, :, :, :]
)
return default_observation, info.rewards[0], info.local_done[0], {
"text_observation": info.text_observations[0],
"brain_info": info}
return (
default_observation,
info.rewards[0],
info.local_done[0],
{"text_observation": info.text_observations[0], "brain_info": info},
)
return (255.0*single_visual_obs).astype(np.uint8)
return (255.0 * single_visual_obs).astype(np.uint8)
else:
return single_visual_obs

default_observation = self.visual_obs
else:
default_observation = info.vector_observations
return list(default_observation), info.rewards, info.local_done, {
"text_observation": info.text_observations,
"brain_info": info}
return (
list(default_observation),
info.rewards,
info.local_done,
{"text_observation": info.text_observations, "brain_info": info},
)
return [(255.0*_visual_obs).astype(np.uint8) for _visual_obs in multiple_visual_obs]
return [
(255.0 * _visual_obs).astype(np.uint8)
for _visual_obs in multiple_visual_obs
]
def render(self, mode='rgb_array'):
def render(self, mode="rgb_array"):
return self.visual_obs
def close(self):

if not self._multiagent and n_agents > 1:
raise UnityGymException(
"The environment was launched as a single-agent environment, however"
"there is more than one agent in the scene.")
"there is more than one agent in the scene."
)
"there is only one agent in the scene.")
"there is only one agent in the scene."
)
raise UnityGymException("The number of agents in the environment has changed since "
"initialization. This is not supported.")
raise UnityGymException(
"The number of agents in the environment has changed since "
"initialization. This is not supported."
)
return {'render.modes': ['rgb_array']}
return {"render.modes": ["rgb_array"]}
return -float('inf'), float('inf')
return -float("inf"), float("inf")
@property
def spec(self):

def number_agents(self):
return self._n_agents
class ActionFlattener():
class ActionFlattener:
def __init__(self,branched_action_space):
def __init__(self, branched_action_space):
"""
Initialize the flattener.
:param branched_action_space: A List containing the sizes of each branch of the action

possible_vals = [range(_num) for _num in branched_action_space]
all_actions = [list(_action) for _action in itertools.product(*possible_vals)]
# Dict should be faster than List for large action spaces
action_lookup = {_scalar: _action for (_scalar, _action) in enumerate(all_actions)}
action_lookup = {
_scalar: _action for (_scalar, _action) in enumerate(all_actions)
}
return action_lookup
def lookup_action(self, action):

67
gym-unity/gym_unity/tests/test_gym.py


from gym_unity.envs import UnityEnv, UnityGymException
@mock.patch('gym_unity.envs.unity_env.UnityEnvironment')
@mock.patch("gym_unity.envs.unity_env.UnityEnvironment")
env = UnityEnv(' ', use_visual=False, multiagent=False)
env = UnityEnv(" ", use_visual=False, multiagent=False)
assert isinstance(env, UnityEnv)
assert isinstance(env.reset(), np.ndarray)
actions = env.action_space.sample()

assert isinstance(info, dict)
@mock.patch('gym_unity.envs.unity_env.UnityEnvironment')
@mock.patch("gym_unity.envs.unity_env.UnityEnvironment")
def test_multi_agent(mock_env):
mock_brain = create_mock_brainparams()
mock_braininfo = create_mock_vector_braininfo(num_agents=2)

UnityEnv(' ', multiagent=False)
UnityEnv(" ", multiagent=False)
env = UnityEnv(' ', use_visual=False, multiagent=True)
env = UnityEnv(" ", use_visual=False, multiagent=True)
assert isinstance(env.reset(), list)
actions = [env.action_space.sample() for i in range(env.number_agents)]
obs, rew, done, info = env.step(actions)

assert isinstance(info, dict)
@mock.patch('gym_unity.envs.unity_env.UnityEnvironment')
@mock.patch("gym_unity.envs.unity_env.UnityEnvironment")
mock_brain = create_mock_brainparams(vector_action_space_type='discrete', vector_action_space_size=[2,2,3])
mock_brain = create_mock_brainparams(
vector_action_space_type="discrete", vector_action_space_size=[2, 2, 3]
)
env = UnityEnv(' ', use_visual=False, multiagent=False, flatten_branched=True)
env = UnityEnv(" ", use_visual=False, multiagent=False, flatten_branched=True)
assert env.action_space.n==12
assert env._flattener.lookup_action(0)==[0,0,0]
assert env._flattener.lookup_action(11)==[1,1,2]
assert env.action_space.n == 12
assert env._flattener.lookup_action(0) == [0, 0, 0]
assert env._flattener.lookup_action(11) == [1, 1, 2]
env = UnityEnv(' ', use_visual=False, multiagent=False, flatten_branched=False)
env = UnityEnv(" ", use_visual=False, multiagent=False, flatten_branched=False)
def create_mock_brainparams(number_visual_observations=0, num_stacked_vector_observations=1,
vector_action_space_type='continuous', vector_observation_space_size=3,
vector_action_space_size=None):
def create_mock_brainparams(
number_visual_observations=0,
num_stacked_vector_observations=1,
vector_action_space_type="continuous",
vector_observation_space_size=3,
vector_action_space_size=None,
):
"""
Creates a mock BrainParameters object with parameters.
"""

mock_brain = mock.Mock()
mock_brain.return_value.number_visual_observations = number_visual_observations
mock_brain.return_value.num_stacked_vector_observations = num_stacked_vector_observations
mock_brain.return_value.num_stacked_vector_observations = (
num_stacked_vector_observations
)
mock_brain.return_value.vector_observation_space_size = vector_observation_space_size
mock_brain.return_value.vector_observation_space_size = (
vector_observation_space_size
)
def create_mock_vector_braininfo(num_agents = 1):
def create_mock_vector_braininfo(num_agents=1):
"""
Creates a mock BrainInfo with vector observations. Imitates constant
vector observations, rewards, dones, and agents.

mock_braininfo = mock.Mock()
mock_braininfo.return_value.vector_observations = np.array([num_agents*[1, 2, 3,]])
mock_braininfo.return_value.rewards = num_agents*[1.0]
mock_braininfo.return_value.local_done = num_agents*[False]
mock_braininfo.return_value.text_observations = num_agents*['']
mock_braininfo.return_value.agents = range(0,num_agents)
mock_braininfo.return_value.vector_observations = np.array([num_agents * [1, 2, 3]])
mock_braininfo.return_value.rewards = num_agents * [1.0]
mock_braininfo.return_value.local_done = num_agents * [False]
mock_braininfo.return_value.text_observations = num_agents * [""]
mock_braininfo.return_value.agents = range(0, num_agents)
return mock_braininfo()

:Mock mock_brain: A mock Brain object that specifies the params of this environment.
:Mock mock_braininfo: A mock BrainInfo object that will be returned at each step and reset.
"""
mock_env.return_value.academy_name = 'MockAcademy'
mock_env.return_value.brains = {'MockBrain':mock_brain}
mock_env.return_value.external_brain_names = ['MockBrain']
mock_env.return_value.reset.return_value = {'MockBrain':mock_braininfo}
mock_env.return_value.step.return_value = {'MockBrain':mock_braininfo}
mock_env.return_value.academy_name = "MockAcademy"
mock_env.return_value.brains = {"MockBrain": mock_brain}
mock_env.return_value.external_brain_names = ["MockBrain"]
mock_env.return_value.reset.return_value = {"MockBrain": mock_braininfo}
mock_env.return_value.step.return_value = {"MockBrain": mock_braininfo}

21
gym-unity/setup.py


from setuptools import setup, find_packages
setup(name='gym_unity',
version='0.4.1',
description='Unity Machine Learning Agents Gym Interface',
license='Apache License 2.0',
author='Unity Technologies',
author_email='ML-Agents@unity3d.com',
url='https://github.com/Unity-Technologies/ml-agents',
packages=find_packages(),
install_requires=['gym', 'mlagents_envs==0.8.1']
)
setup(
name="gym_unity",
version="0.4.1",
description="Unity Machine Learning Agents Gym Interface",
license="Apache License 2.0",
author="Unity Technologies",
author_email="ML-Agents@unity3d.com",
url="https://github.com/Unity-Technologies/ml-agents",
packages=find_packages(),
install_requires=["gym", "mlagents_envs==0.8.1"],
)

4
ml-agents-envs/mlagents/envs/base_unity_environment.py


class BaseUnityEnvironment(ABC):
@abstractmethod
def step(self, vector_action=None, memory=None, text_action=None, value=None) -> AllBrainInfo:
def step(
self, vector_action=None, memory=None, text_action=None, value=None
) -> AllBrainInfo:
pass
@abstractmethod

141
ml-agents-envs/mlagents/envs/brain.py


class BrainInfo:
def __init__(self, visual_observation, vector_observation, text_observations, memory=None,
reward=None, agents=None, local_done=None,
vector_action=None, text_action=None, max_reached=None, action_mask=None,
custom_observations=None):
def __init__(
self,
visual_observation,
vector_observation,
text_observations,
memory=None,
reward=None,
agents=None,
local_done=None,
vector_action=None,
text_action=None,
max_reached=None,
action_mask=None,
custom_observations=None,
):
"""
Describes experience at current step of all agents linked to a brain.
"""

def merge(self, other):
for i in range(len(self.visual_observations)):
self.visual_observations[i].extend(other.visual_observations[i])
self.vector_observations = np.append(self.vector_observations, other.vector_observations, axis=0)
self.vector_observations = np.append(
self.vector_observations, other.vector_observations, axis=0
)
self.memories = self.merge_memories(self.memories, other.memories, self.agents, other.agents)
self.memories = self.merge_memories(
self.memories, other.memories, self.agents, other.agents
)
self.rewards = safe_concat_lists(self.rewards, other.rewards)
self.local_done = safe_concat_lists(self.local_done, other.local_done)
self.max_reached = safe_concat_lists(self.max_reached, other.max_reached)

self.previous_text_actions = safe_concat_lists(
self.previous_text_actions, other.previous_text_actions
)
self.action_masks = safe_concat_np_ndarray(self.action_masks, other.action_masks)
self.custom_observations = safe_concat_lists(self.custom_observations, other.custom_observations)
self.action_masks = safe_concat_np_ndarray(
self.action_masks, other.action_masks
)
self.custom_observations = safe_concat_lists(
self.custom_observations, other.custom_observations
)
@staticmethod
def merge_memories(m1, m2, agents1, agents2):

m2 = np.zeros((len(agents2), m1.shape[1]))
elif m2.shape[1] > m1.shape[1]:
new_m1 = np.zeros((m1.shape[0], m2.shape[1]))
new_m1[0:m1.shape[0], 0:m1.shape[1]] = m1
new_m1[0 : m1.shape[0], 0 : m1.shape[1]] = m1
new_m2[0:m2.shape[0], 0:m2.shape[1]] = m2
new_m2[0 : m2.shape[0], 0 : m2.shape[1]] = m2
return np.append(m1, new_m2, axis=0)
return np.append(m1, m2, axis=0)

"""
vis_obs = []
for i in range(brain_params.number_visual_observations):
obs = [BrainInfo.process_pixels(x.visual_observations[i],
brain_params.camera_resolutions[i]['blackAndWhite'])
for x in agent_info_list]
obs = [
BrainInfo.process_pixels(
x.visual_observations[i],
brain_params.camera_resolutions[i]["blackAndWhite"],
)
for x in agent_info_list
]
vis_obs += [obs]
if len(agent_info_list) == 0:
memory_size = 0

memory = np.zeros((0, 0))
else:
[x.memories.extend([0] * (memory_size - len(x.memories))) for x in agent_info_list]
[
x.memories.extend([0] * (memory_size - len(x.memories)))
for x in agent_info_list
]
memory = np.array([list(x.memories) for x in agent_info_list])
total_num_actions = sum(brain_params.vector_action_space_size)
mask_actions = np.ones((len(agent_info_list), total_num_actions))

mask_actions[agent_index, :] = [
0 if agent_info.action_mask[k] else 1 for k in range(total_num_actions)]
0 if agent_info.action_mask[k] else 1
for k in range(total_num_actions)
]
logger.warning("An agent had a NaN reward for brain " + brain_params.brain_name)
logger.warning(
"An agent had a NaN reward for brain " + brain_params.brain_name
)
logger.warning("An agent had a NaN observation for brain " + brain_params.brain_name)
logger.warning(
"An agent had a NaN observation for brain " + brain_params.brain_name
)
(0, brain_params.vector_observation_space_size * brain_params.num_stacked_vector_observations)
(
0,
brain_params.vector_observation_space_size
* brain_params.num_stacked_vector_observations,
)
)
else:
vector_obs = np.nan_to_num(

text_action=[list(x.stored_text_actions) for x in agent_info_list],
max_reached=[x.max_step_reached for x in agent_info_list],
custom_observations=[x.custom_observation for x in agent_info_list],
action_mask=mask_actions
action_mask=mask_actions,
)
return brain_info

class BrainParameters:
def __init__(self,
brain_name: str,
vector_observation_space_size: int,
num_stacked_vector_observations: int,
camera_resolutions: List[Dict],
vector_action_space_size: List[int],
vector_action_descriptions: List[str],
vector_action_space_type: int):
def __init__(
self,
brain_name: str,
vector_observation_space_size: int,
num_stacked_vector_observations: int,
camera_resolutions: List[Dict],
vector_action_space_size: List[int],
vector_action_descriptions: List[str],
vector_action_space_type: int,
):
"""
Contains all brain-specific parameters.
"""

self.camera_resolutions = camera_resolutions
self.vector_action_space_size = vector_action_space_size
self.vector_action_descriptions = vector_action_descriptions
self.vector_action_space_type = ["discrete", "continuous"][vector_action_space_type]
self.vector_action_space_type = ["discrete", "continuous"][
vector_action_space_type
]
return '''Unity brain name: {}
return """Unity brain name: {}
Vector Action descriptions: {}'''.format(self.brain_name,
str(self.number_visual_observations),
str(self.vector_observation_space_size),
str(self.num_stacked_vector_observations),
self.vector_action_space_type,
str(self.vector_action_space_size),
', '.join(self.vector_action_descriptions))
Vector Action descriptions: {}""".format(
self.brain_name,
str(self.number_visual_observations),
str(self.vector_observation_space_size),
str(self.num_stacked_vector_observations),
self.vector_action_space_type,
str(self.vector_action_space_size),
", ".join(self.vector_action_descriptions),
)
@staticmethod
def from_proto(brain_param_proto):

:return: BrainParameter object.
"""
resolution = [{
"height": x.height,
"width": x.width,
"blackAndWhite": x.gray_scale
} for x in brain_param_proto.camera_resolutions]
brain_params = BrainParameters(brain_param_proto.brain_name,
brain_param_proto.vector_observation_size,
brain_param_proto.num_stacked_vector_observations,
resolution,
list(brain_param_proto.vector_action_size),
list(brain_param_proto.vector_action_descriptions),
brain_param_proto.vector_action_space_type)
return brain_params
resolution = [
{"height": x.height, "width": x.width, "blackAndWhite": x.gray_scale}
for x in brain_param_proto.camera_resolutions
]
brain_params = BrainParameters(
brain_param_proto.brain_name,
brain_param_proto.vector_observation_size,
brain_param_proto.num_stacked_vector_observations,
resolution,
list(brain_param_proto.vector_action_size),
list(brain_param_proto.vector_action_descriptions),
brain_param_proto.vector_action_space_type,
)
return brain_params

1
ml-agents-envs/mlagents/envs/communicator.py


"""
Sends a shutdown signal to the unity environment, and closes the connection.
"""

212
ml-agents-envs/mlagents/envs/communicator_objects/agent_action_proto_pb2.py


# source: mlagents/envs/communicator_objects/agent_action_proto.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1"))
from mlagents.envs.communicator_objects import custom_action_pb2 as mlagents_dot_envs_dot_communicator__objects_dot_custom__action__pb2
from mlagents.envs.communicator_objects import (
custom_action_pb2 as mlagents_dot_envs_dot_communicator__objects_dot_custom__action__pb2,
)
name='mlagents/envs/communicator_objects/agent_action_proto.proto',
package='communicator_objects',
syntax='proto3',
serialized_options=_b('\252\002\034MLAgents.CommunicatorObjects'),
serialized_pb=_b('\n;mlagents/envs/communicator_objects/agent_action_proto.proto\x12\x14\x63ommunicator_objects\x1a\x36mlagents/envs/communicator_objects/custom_action.proto\"\x9c\x01\n\x10\x41gentActionProto\x12\x16\n\x0evector_actions\x18\x01 \x03(\x02\x12\x14\n\x0ctext_actions\x18\x02 \x01(\t\x12\x10\n\x08memories\x18\x03 \x03(\x02\x12\r\n\x05value\x18\x04 \x01(\x02\x12\x39\n\rcustom_action\x18\x05 \x01(\x0b\x32\".communicator_objects.CustomActionB\x1f\xaa\x02\x1cMLAgents.CommunicatorObjectsb\x06proto3')
,
dependencies=[mlagents_dot_envs_dot_communicator__objects_dot_custom__action__pb2.DESCRIPTOR,])
name="mlagents/envs/communicator_objects/agent_action_proto.proto",
package="communicator_objects",
syntax="proto3",
serialized_options=_b("\252\002\034MLAgents.CommunicatorObjects"),
serialized_pb=_b(
'\n;mlagents/envs/communicator_objects/agent_action_proto.proto\x12\x14\x63ommunicator_objects\x1a\x36mlagents/envs/communicator_objects/custom_action.proto"\x9c\x01\n\x10\x41gentActionProto\x12\x16\n\x0evector_actions\x18\x01 \x03(\x02\x12\x14\n\x0ctext_actions\x18\x02 \x01(\t\x12\x10\n\x08memories\x18\x03 \x03(\x02\x12\r\n\x05value\x18\x04 \x01(\x02\x12\x39\n\rcustom_action\x18\x05 \x01(\x0b\x32".communicator_objects.CustomActionB\x1f\xaa\x02\x1cMLAgents.CommunicatorObjectsb\x06proto3'
),
dependencies=[
mlagents_dot_envs_dot_communicator__objects_dot_custom__action__pb2.DESCRIPTOR
],
)
name='AgentActionProto',
full_name='communicator_objects.AgentActionProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='vector_actions', full_name='communicator_objects.AgentActionProto.vector_actions', index=0,
number=1, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='text_actions', full_name='communicator_objects.AgentActionProto.text_actions', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='memories', full_name='communicator_objects.AgentActionProto.memories', index=2,
number=3, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='communicator_objects.AgentActionProto.value', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='custom_action', full_name='communicator_objects.AgentActionProto.custom_action', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=142,
serialized_end=298,
name="AgentActionProto",
full_name="communicator_objects.AgentActionProto",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="vector_actions",
full_name="communicator_objects.AgentActionProto.vector_actions",
index=0,
number=1,
type=2,
cpp_type=6,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="text_actions",
full_name="communicator_objects.AgentActionProto.text_actions",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="memories",
full_name="communicator_objects.AgentActionProto.memories",
index=2,
number=3,
type=2,
cpp_type=6,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="value",
full_name="communicator_objects.AgentActionProto.value",
index=3,
number=4,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="custom_action",
full_name="communicator_objects.AgentActionProto.custom_action",
index=4,
number=5,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=142,
serialized_end=298,
_AGENTACTIONPROTO.fields_by_name['custom_action'].message_type = mlagents_dot_envs_dot_communicator__objects_dot_custom__action__pb2._CUSTOMACTION
DESCRIPTOR.message_types_by_name['AgentActionProto'] = _AGENTACTIONPROTO
_AGENTACTIONPROTO.fields_by_name[
"custom_action"
].message_type = (
mlagents_dot_envs_dot_communicator__objects_dot_custom__action__pb2._CUSTOMACTION
)
DESCRIPTOR.message_types_by_name["AgentActionProto"] = _AGENTACTIONPROTO
AgentActionProto = _reflection.GeneratedProtocolMessageType('AgentActionProto', (_message.Message,), dict(
DESCRIPTOR = _AGENTACTIONPROTO,
__module__ = 'mlagents.envs.communicator_objects.agent_action_proto_pb2'
# @@protoc_insertion_point(class_scope:communicator_objects.AgentActionProto)
))
AgentActionProto = _reflection.GeneratedProtocolMessageType(
"AgentActionProto",
(_message.Message,),
dict(
DESCRIPTOR=_AGENTACTIONPROTO,
__module__="mlagents.envs.communicator_objects.agent_action_proto_pb2"
# @@protoc_insertion_point(class_scope:communicator_objects.AgentActionProto)
),
)
_sym_db.RegisterMessage(AgentActionProto)

387
ml-agents-envs/mlagents/envs/communicator_objects/agent_info_proto_pb2.py


# source: mlagents/envs/communicator_objects/agent_info_proto.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1"))
from mlagents.envs.communicator_objects import custom_observation_pb2 as mlagents_dot_envs_dot_communicator__objects_dot_custom__observation__pb2
from mlagents.envs.communicator_objects import (
custom_observation_pb2 as mlagents_dot_envs_dot_communicator__objects_dot_custom__observation__pb2,
)
name='mlagents/envs/communicator_objects/agent_info_proto.proto',
package='communicator_objects',
syntax='proto3',
serialized_options=_b('\252\002\034MLAgents.CommunicatorObjects'),
serialized_pb=_b('\n9mlagents/envs/communicator_objects/agent_info_proto.proto\x12\x14\x63ommunicator_objects\x1a;mlagents/envs/communicator_objects/custom_observation.proto\"\xd7\x02\n\x0e\x41gentInfoProto\x12\"\n\x1astacked_vector_observation\x18\x01 \x03(\x02\x12\x1b\n\x13visual_observations\x18\x02 \x03(\x0c\x12\x18\n\x10text_observation\x18\x03 \x01(\t\x12\x1d\n\x15stored_vector_actions\x18\x04 \x03(\x02\x12\x1b\n\x13stored_text_actions\x18\x05 \x01(\t\x12\x10\n\x08memories\x18\x06 \x03(\x02\x12\x0e\n\x06reward\x18\x07 \x01(\x02\x12\x0c\n\x04\x64one\x18\x08 \x01(\x08\x12\x18\n\x10max_step_reached\x18\t \x01(\x08\x12\n\n\x02id\x18\n \x01(\x05\x12\x13\n\x0b\x61\x63tion_mask\x18\x0b \x03(\x08\x12\x43\n\x12\x63ustom_observation\x18\x0c \x01(\x0b\x32\'.communicator_objects.CustomObservationB\x1f\xaa\x02\x1cMLAgents.CommunicatorObjectsb\x06proto3')
,
dependencies=[mlagents_dot_envs_dot_communicator__objects_dot_custom__observation__pb2.DESCRIPTOR,])
name="mlagents/envs/communicator_objects/agent_info_proto.proto",
package="communicator_objects",
syntax="proto3",
serialized_options=_b("\252\002\034MLAgents.CommunicatorObjects"),
serialized_pb=_b(
'\n9mlagents/envs/communicator_objects/agent_info_proto.proto\x12\x14\x63ommunicator_objects\x1a;mlagents/envs/communicator_objects/custom_observation.proto"\xd7\x02\n\x0e\x41gentInfoProto\x12"\n\x1astacked_vector_observation\x18\x01 \x03(\x02\x12\x1b\n\x13visual_observations\x18\x02 \x03(\x0c\x12\x18\n\x10text_observation\x18\x03 \x01(\t\x12\x1d\n\x15stored_vector_actions\x18\x04 \x03(\x02\x12\x1b\n\x13stored_text_actions\x18\x05 \x01(\t\x12\x10\n\x08memories\x18\x06 \x03(\x02\x12\x0e\n\x06reward\x18\x07 \x01(\x02\x12\x0c\n\x04\x64one\x18\x08 \x01(\x08\x12\x18\n\x10max_step_reached\x18\t \x01(\x08\x12\n\n\x02id\x18\n \x01(\x05\x12\x13\n\x0b\x61\x63tion_mask\x18\x0b \x03(\x08\x12\x43\n\x12\x63ustom_observation\x18\x0c \x01(\x0b\x32\'.communicator_objects.CustomObservationB\x1f\xaa\x02\x1cMLAgents.CommunicatorObjectsb\x06proto3'
),
dependencies=[
mlagents_dot_envs_dot_communicator__objects_dot_custom__observation__pb2.DESCRIPTOR
],
)
name='AgentInfoProto',
full_name='communicator_objects.AgentInfoProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='stacked_vector_observation', full_name='communicator_objects.AgentInfoProto.stacked_vector_observation', index=0,
number=1, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='visual_observations', full_name='communicator_objects.AgentInfoProto.visual_observations', index=1,
number=2, type=12, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='text_observation', full_name='communicator_objects.AgentInfoProto.text_observation', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='stored_vector_actions', full_name='communicator_objects.AgentInfoProto.stored_vector_actions', index=3,
number=4, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='stored_text_actions', full_name='communicator_objects.AgentInfoProto.stored_text_actions', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='memories', full_name='communicator_objects.AgentInfoProto.memories', index=5,
number=6, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='reward', full_name='communicator_objects.AgentInfoProto.reward', index=6,
number=7, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='done', full_name='communicator_objects.AgentInfoProto.done', index=7,
number=8, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_step_reached', full_name='communicator_objects.AgentInfoProto.max_step_reached', index=8,
number=9, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='id', full_name='communicator_objects.AgentInfoProto.id', index=9,
number=10, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='action_mask', full_name='communicator_objects.AgentInfoProto.action_mask', index=10,
number=11, type=8, cpp_type=7, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='custom_observation', full_name='communicator_objects.AgentInfoProto.custom_observation', index=11,
number=12, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=145,
serialized_end=488,
name="AgentInfoProto",
full_name="communicator_objects.AgentInfoProto",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="stacked_vector_observation",
full_name="communicator_objects.AgentInfoProto.stacked_vector_observation",
index=0,
number=1,
type=2,
cpp_type=6,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="visual_observations",
full_name="communicator_objects.AgentInfoProto.visual_observations",
index=1,
number=2,
type=12,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="text_observation",
full_name="communicator_objects.AgentInfoProto.text_observation",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="stored_vector_actions",
full_name="communicator_objects.AgentInfoProto.stored_vector_actions",
index=3,
number=4,
type=2,
cpp_type=6,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="stored_text_actions",
full_name="communicator_objects.AgentInfoProto.stored_text_actions",
index=4,
number=5,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="memories",
full_name="communicator_objects.AgentInfoProto.memories",
index=5,
number=6,
type=2,
cpp_type=6,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="reward",
full_name="communicator_objects.AgentInfoProto.reward",
index=6,
number=7,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="done",
full_name="communicator_objects.AgentInfoProto.done",
index=7,
number=8,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="max_step_reached",
full_name="communicator_objects.AgentInfoProto.max_step_reached",
index=8,
number=9,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="id",
full_name="communicator_objects.AgentInfoProto.id",
index=9,
number=10,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="action_mask",
full_name="communicator_objects.AgentInfoProto.action_mask",
index=10,
number=11,
type=8,
cpp_type=7,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="custom_observation",
full_name="communicator_objects.AgentInfoProto.custom_observation",
index=11,
number=12,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=145,
serialized_end=488,
_AGENTINFOPROTO.fields_by_name['custom_observation'].message_type = mlagents_dot_envs_dot_communicator__objects_dot_custom__observation__pb2._CUSTOMOBSERVATION
DESCRIPTOR.message_types_by_name['AgentInfoProto'] = _AGENTINFOPROTO
_AGENTINFOPROTO.fields_by_name[
"custom_observation"
].message_type = (
mlagents_dot_envs_dot_communicator__objects_dot_custom__observation__pb2._CUSTOMOBSERVATION
)
DESCRIPTOR.message_types_by_name["AgentInfoProto"] = _AGENTINFOPROTO
AgentInfoProto = _reflection.GeneratedProtocolMessageType('AgentInfoProto', (_message.Message,), dict(
DESCRIPTOR = _AGENTINFOPROTO,
__module__ = 'mlagents.envs.communicator_objects.agent_info_proto_pb2'
# @@protoc_insertion_point(class_scope:communicator_objects.AgentInfoProto)
))
AgentInfoProto = _reflection.GeneratedProtocolMessageType(
"AgentInfoProto",
(_message.Message,),
dict(
DESCRIPTOR=_AGENTINFOPROTO,
__module__="mlagents.envs.communicator_objects.agent_info_proto_pb2"
# @@protoc_insertion_point(class_scope:communicator_objects.AgentInfoProto)
),
)
_sym_db.RegisterMessage(AgentInfoProto)

298
ml-agents-envs/mlagents/envs/communicator_objects/brain_parameters_proto_pb2.py


# source: mlagents/envs/communicator_objects/brain_parameters_proto.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1"))
from mlagents.envs.communicator_objects import resolution_proto_pb2 as mlagents_dot_envs_dot_communicator__objects_dot_resolution__proto__pb2
from mlagents.envs.communicator_objects import space_type_proto_pb2 as mlagents_dot_envs_dot_communicator__objects_dot_space__type__proto__pb2
from mlagents.envs.communicator_objects import (
resolution_proto_pb2 as mlagents_dot_envs_dot_communicator__objects_dot_resolution__proto__pb2,
)
from mlagents.envs.communicator_objects import (
space_type_proto_pb2 as mlagents_dot_envs_dot_communicator__objects_dot_space__type__proto__pb2,
)
name='mlagents/envs/communicator_objects/brain_parameters_proto.proto',
package='communicator_objects',
syntax='proto3',
serialized_options=_b('\252\002\034MLAgents.CommunicatorObjects'),
serialized_pb=_b('\n?mlagents/envs/communicator_objects/brain_parameters_proto.proto\x12\x14\x63ommunicator_objects\x1a\x39mlagents/envs/communicator_objects/resolution_proto.proto\x1a\x39mlagents/envs/communicator_objects/space_type_proto.proto\"\xd4\x02\n\x14\x42rainParametersProto\x12\x1f\n\x17vector_observation_size\x18\x01 \x01(\x05\x12\'\n\x1fnum_stacked_vector_observations\x18\x02 \x01(\x05\x12\x1a\n\x12vector_action_size\x18\x03 \x03(\x05\x12\x41\n\x12\x63\x61mera_resolutions\x18\x04 \x03(\x0b\x32%.communicator_objects.ResolutionProto\x12\"\n\x1avector_action_descriptions\x18\x05 \x03(\t\x12\x46\n\x18vector_action_space_type\x18\x06 \x01(\x0e\x32$.communicator_objects.SpaceTypeProto\x12\x12\n\nbrain_name\x18\x07 \x01(\t\x12\x13\n\x0bis_training\x18\x08 \x01(\x08\x42\x1f\xaa\x02\x1cMLAgents.CommunicatorObjectsb\x06proto3')
,
dependencies=[mlagents_dot_envs_dot_communicator__objects_dot_resolution__proto__pb2.DESCRIPTOR,mlagents_dot_envs_dot_communicator__objects_dot_space__type__proto__pb2.DESCRIPTOR,])
name="mlagents/envs/communicator_objects/brain_parameters_proto.proto",
package="communicator_objects",
syntax="proto3",
serialized_options=_b("\252\002\034MLAgents.CommunicatorObjects"),
serialized_pb=_b(
'\n?mlagents/envs/communicator_objects/brain_parameters_proto.proto\x12\x14\x63ommunicator_objects\x1a\x39mlagents/envs/communicator_objects/resolution_proto.proto\x1a\x39mlagents/envs/communicator_objects/space_type_proto.proto"\xd4\x02\n\x14\x42rainParametersProto\x12\x1f\n\x17vector_observation_size\x18\x01 \x01(\x05\x12\'\n\x1fnum_stacked_vector_observations\x18\x02 \x01(\x05\x12\x1a\n\x12vector_action_size\x18\x03 \x03(\x05\x12\x41\n\x12\x63\x61mera_resolutions\x18\x04 \x03(\x0b\x32%.communicator_objects.ResolutionProto\x12"\n\x1avector_action_descriptions\x18\x05 \x03(\t\x12\x46\n\x18vector_action_space_type\x18\x06 \x01(\x0e\x32$.communicator_objects.SpaceTypeProto\x12\x12\n\nbrain_name\x18\x07 \x01(\t\x12\x13\n\x0bis_training\x18\x08 \x01(\x08\x42\x1f\xaa\x02\x1cMLAgents.CommunicatorObjectsb\x06proto3'
),
dependencies=[
mlagents_dot_envs_dot_communicator__objects_dot_resolution__proto__pb2.DESCRIPTOR,
mlagents_dot_envs_dot_communicator__objects_dot_space__type__proto__pb2.DESCRIPTOR,
],
)
name='BrainParametersProto',
full_name='communicator_objects.BrainParametersProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='vector_observation_size', full_name='communicator_objects.BrainParametersProto.vector_observation_size', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='num_stacked_vector_observations', full_name='communicator_objects.BrainParametersProto.num_stacked_vector_observations', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='vector_action_size', full_name='communicator_objects.BrainParametersProto.vector_action_size', index=2,
number=3, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='camera_resolutions', full_name='communicator_objects.BrainParametersProto.camera_resolutions', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='vector_action_descriptions', full_name='communicator_objects.BrainParametersProto.vector_action_descriptions', index=4,
number=5, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='vector_action_space_type', full_name='communicator_objects.BrainParametersProto.vector_action_space_type', index=5,
number=6, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='brain_name', full_name='communicator_objects.BrainParametersProto.brain_name', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='is_training', full_name='communicator_objects.BrainParametersProto.is_training', index=7,
number=8, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=208,
serialized_end=548,
name="BrainParametersProto",
full_name="communicator_objects.BrainParametersProto",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="vector_observation_size",
full_name="communicator_objects.BrainParametersProto.vector_observation_size",
index=0,
number=1,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="num_stacked_vector_observations",
full_name="communicator_objects.BrainParametersProto.num_stacked_vector_observations",
index=1,
number=2,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(