浏览代码

Use a class for camera res, not dict (#2656)

/develop-gpu-test
GitHub 5 年前
当前提交
68965c7b
共有 4 个文件被更改,包括 31 次插入23 次删除
  1. 10
      gym-unity/gym_unity/envs/__init__.py
  2. 23
      ml-agents-envs/mlagents/envs/brain.py
  3. 18
      ml-agents/mlagents/trainers/models.py
  4. 3
      ml-agents/mlagents/trainers/tests/mock_brain.py

10
gym-unity/gym_unity/envs/__init__.py


high = np.array([np.inf] * brain.vector_observation_space_size)
self.action_meanings = brain.vector_action_descriptions
if self.use_visual:
if brain.camera_resolutions[0]["blackAndWhite"]:
depth = 1
else:
depth = 3
brain.camera_resolutions[0]["height"],
brain.camera_resolutions[0]["width"],
depth,
brain.camera_resolutions.height,
brain.camera_resolutions.width,
brain.camera_resolutions.num_channels,
),
)
else:

23
ml-agents-envs/mlagents/envs/brain.py


from mlagents.envs.communicator_objects.agent_info_pb2 import AgentInfoProto
from mlagents.envs.communicator_objects.brain_parameters_pb2 import BrainParametersProto
from mlagents.envs.timers import hierarchical_timer, timed
from typing import Dict, List, Optional
from typing import Dict, List, NamedTuple, Optional
class CameraResolution(NamedTuple):
height: int
width: int
gray_scale: bool
@property
def num_channels(self) -> int:
return 1 if self.gray_scale else 3
@staticmethod
def from_proto(p):
return CameraResolution(height=p.height, width=p.width, gray_scale=p.gray_scale)
class BrainParameters:
def __init__(
self,

camera_resolutions: List[Dict],
camera_resolutions: List[CameraResolution],
vector_action_space_size: List[int],
vector_action_descriptions: List[str],
vector_action_space_type: int,

:return: BrainParameter object.
"""
resolution = [
{"height": x.height, "width": x.width, "blackAndWhite": x.gray_scale}
for x in brain_param_proto.camera_resolutions
CameraResolution.from_proto(x) for x in brain_param_proto.camera_resolutions
]
brain_params = BrainParameters(
brain_param_proto.brain_name,

obs = [
BrainInfo.process_pixels(
x.visual_observations[i],
brain_params.camera_resolutions[i]["blackAndWhite"],
brain_params.camera_resolutions[i].gray_scale,
)
for x in agent_info_list
]

18
ml-agents/mlagents/trainers/models.py


import logging
from enum import Enum
from typing import Any, Callable, Dict, List
from typing import Callable, List
import numpy as np
import tensorflow as tf

from mlagents.envs.brain import CameraResolution
logger = logging.getLogger("mlagents.trainers")

return tf.multiply(input_activation, tf.nn.sigmoid(input_activation))
@staticmethod
def create_visual_input(camera_parameters: Dict[str, Any], name: str) -> tf.Tensor:
def create_visual_input(
camera_parameters: CameraResolution, name: str
) -> tf.Tensor:
"""
Creates image input op.
:param camera_parameters: Parameters for visual observation from BrainInfo.

o_size_h = camera_parameters["height"]
o_size_w = camera_parameters["width"]
bw = camera_parameters["blackAndWhite"]
if bw:
c_channels = 1
else:
c_channels = 3
o_size_h = camera_parameters.height
o_size_w = camera_parameters.width
c_channels = camera_parameters.num_channels
visual_in = tf.placeholder(
shape=[None, o_size_h, o_size_w, c_channels], dtype=tf.float32, name=name

3
ml-agents/mlagents/trainers/tests/mock_brain.py


import unittest.mock as mock
import numpy as np
from mlagents.envs.brain import CameraResolution
from mlagents.trainers.buffer import Buffer

mock_brain.return_value.vector_observation_space_size = (
vector_observation_space_size
)
camrez = {"blackAndWhite": False, "height": 84, "width": 84}
camrez = CameraResolution(height=84, width=84, gray_scale=False)
mock_brain.return_value.camera_resolutions = [camrez] * number_visual_observations
mock_brain.return_value.vector_action_space_size = vector_action_space_size
mock_brain.return_value.brain_name = "MockBrain"

正在加载...
取消
保存