浏览代码

Fix bug and update tests (#850)

/develop-generalizationTraining-TrainerController
GitHub 7 年前
当前提交
a720e370
共有 6 个文件被更改,包括 27 次插入26 次删除
  1. 15
      python/tests/mock_communicator.py
  2. 14
      python/tests/test_bc.py
  3. 7
      python/tests/test_ppo.py
  4. 8
      python/tests/test_unityagents.py
  5. 6
      python/tests/test_unitytrainers.py
  6. 3
      python/unitytrainers/models.py

15
python/tests/mock_communicator.py


class MockCommunicator(Communicator):
def __init__(self, discrete=False, visual_input=False):
def __init__(self, discrete=False, visual_inputs=0):
"""
Python side of the grpc communication. Python is the client and Unity the server

self.is_discrete = discrete
self.steps = 0
self.visual_input = visual_input
self.visual_inputs = visual_inputs
if self.visual_input:
resolutions = [ResolutionProto(
width=30,
height=40,
gray_scale=False)]
else:
resolutions = []
resolutions = [ResolutionProto(
width=30,
height=40,
gray_scale=False) for i in range(self.visual_inputs)]
bp = BrainParametersProto(
vector_observation_size=3,
num_stacked_vector_observations=2,

14
python/tests/test_bc.py


with tf.Session() as sess:
with tf.variable_scope("FakeGraphScope"):
mock_communicator.return_value = MockCommunicator(
discrete=False, visual_input=False)
discrete=False, visual_inputs=0)
env = UnityEnvironment(' ')
model = BehavioralCloningModel(env.brains["RealFakeBrain"])
init = tf.global_variables_initializer()

with tf.Session() as sess:
with tf.variable_scope("FakeGraphScope"):
mock_communicator.return_value = MockCommunicator(
discrete=True, visual_input=False)
discrete=True, visual_inputs=0)
env = UnityEnvironment(' ')
model = BehavioralCloningModel(env.brains["RealFakeBrain"])
init = tf.global_variables_initializer()

with tf.Session() as sess:
with tf.variable_scope("FakeGraphScope"):
mock_communicator.return_value = MockCommunicator(
discrete=True, visual_input=True)
discrete=True, visual_inputs=2)
env = UnityEnvironment(' ')
model = BehavioralCloningModel(env.brains["RealFakeBrain"])
init = tf.global_variables_initializer()

model.sequence_length: 1,
model.vector_in: np.array([[1, 2, 3, 1, 2, 3],
[3, 4, 5, 3, 4, 5]]),
model.visual_in[0]: np.ones([2, 40, 30, 3])}
model.visual_in[0]: np.ones([2, 40, 30, 3]),
model.visual_in[1]: np.ones([2, 40, 30, 3])}
sess.run(run_list, feed_dict=feed_dict)
env.close()

with tf.Session() as sess:
with tf.variable_scope("FakeGraphScope"):
mock_communicator.return_value = MockCommunicator(
discrete=False, visual_input=True)
discrete=False, visual_inputs=2)
env = UnityEnvironment(' ')
model = BehavioralCloningModel(env.brains["RealFakeBrain"])
init = tf.global_variables_initializer()

model.sequence_length: 1,
model.vector_in: np.array([[1, 2, 3, 1, 2, 3],
[3, 4, 5, 3, 4, 5]]),
model.visual_in[0]: np.ones([2, 40, 30, 3])}
model.visual_in[0]: np.ones([2, 40, 30, 3]),
model.visual_in[1]: np.ones([2, 40, 30, 3])}
sess.run(run_list, feed_dict=feed_dict)
env.close()

7
python/tests/test_ppo.py


with tf.Session() as sess:
with tf.variable_scope("FakeGraphScope"):
mock_communicator.return_value = MockCommunicator(
discrete=False, visual_input=False)
discrete=False, visual_inputs=0)
env = UnityEnvironment(' ')
model = PPOModel(env.brains["RealFakeBrain"])

with tf.Session() as sess:
with tf.variable_scope("FakeGraphScope"):
mock_communicator.return_value = MockCommunicator(
discrete=True, visual_input=True)
discrete=True, visual_inputs=2)
env = UnityEnvironment(' ')
model = PPOModel(env.brains["RealFakeBrain"])
init = tf.global_variables_initializer()

model.sequence_length: 1,
model.vector_in: np.array([[1, 2, 3, 1, 2, 3],
[3, 4, 5, 3, 4, 5]]),
model.visual_in[0]: np.ones([2, 40, 30, 3])
model.visual_in[0]: np.ones([2, 40, 30, 3]),
model.visual_in[1]: np.ones([2, 40, 30, 3])
}
sess.run(run_list, feed_dict=feed_dict)
env.close()

8
python/tests/test_unityagents.py


@mock.patch('unityagents.UnityEnvironment.get_communicator')
def test_initialization(mock_communicator, mock_launcher):
mock_communicator.return_value = MockCommunicator(
discrete=False, visual_input=False)
discrete=False, visual_inputs=0)
env = UnityEnvironment(' ')
with pytest.raises(UnityActionException):
env.step([0])

@mock.patch('unityagents.UnityEnvironment.get_communicator')
def test_reset(mock_communicator, mock_launcher):
mock_communicator.return_value = MockCommunicator(
discrete=False, visual_input=False)
discrete=False, visual_inputs=0)
env = UnityEnvironment(' ')
brain = env.brains['RealFakeBrain']
brain_info = env.reset()

@mock.patch('unityagents.UnityEnvironment.get_communicator')
def test_step(mock_communicator, mock_launcher):
mock_communicator.return_value = MockCommunicator(
discrete=False, visual_input=False)
discrete=False, visual_inputs=0)
env = UnityEnvironment(' ')
brain = env.brains['RealFakeBrain']
brain_info = env.reset()

@mock.patch('unityagents.UnityEnvironment.get_communicator')
def test_close(mock_communicator, mock_launcher):
comm = MockCommunicator(
discrete=False, visual_input=False)
discrete=False, visual_inputs=0)
mock_communicator.return_value = comm
env = UnityEnvironment(' ')
assert env._loaded

6
python/tests/test_unitytrainers.py


@mock.patch('unityagents.UnityEnvironment.get_communicator')
def test_initialization(mock_communicator, mock_launcher):
mock_communicator.return_value = MockCommunicator(
discrete=True, visual_input=True)
discrete=True, visual_inputs=1)
tc = TrainerController(' ', ' ', 1, None, True, True, False, 1,
1, 1, 1, '', "tests/test_unitytrainers.py", False)
assert(tc.env.brain_names[0] == 'RealFakeBrain')

with mock.patch(open_name, create=True) as _:
mock_load.return_value = dummy_config
mock_communicator.return_value = MockCommunicator(
discrete=True, visual_input=True)
discrete=True, visual_inputs=1)
mock_load.return_value = dummy_config
tc = TrainerController(' ', ' ', 1, None, True, True, False, 1,
1, 1, 1, '','', False)

with mock.patch('yaml.load') as mock_load:
with mock.patch(open_name, create=True) as _:
mock_communicator.return_value = MockCommunicator(
discrete=True, visual_input=True)
discrete=True, visual_inputs=1)
tc = TrainerController(' ', ' ', 1, None, True, True, False, 1,
1, 1, 1, '', "tests/test_unitytrainers.py", False)

3
python/unitytrainers/models.py


for j in range(brain.number_visual_observations):
encoded_visual = self.create_visual_observation_encoder(self.visual_in[j], h_size,
activation_fn, num_layers,
"main_graph_{}".format(i), False)
"main_graph_{}_encoder{}"
.format(i, j), False)
visual_encoders.append(encoded_visual)
hidden_visual = tf.concat(visual_encoders, axis=1)
if brain.vector_observation_space_size > 0:

正在加载...
取消
保存