浏览代码
Merge pull request #1934 from Unity-Technologies/develop-black
Merge pull request #1934 from Unity-Technologies/develop-black
Black formatting/develop-generalizationTraining-TrainerController
GitHub
6 年前
当前提交
70d14910
共有 74 个文件被更改,包括 6023 次插入 和 3767 次删除
-
9.circleci/config.yml
-
5CONTRIBUTING.md
-
131gym-unity/gym_unity/envs/unity_env.py
-
67gym-unity/gym_unity/tests/test_gym.py
-
21gym-unity/setup.py
-
4ml-agents-envs/mlagents/envs/base_unity_environment.py
-
141ml-agents-envs/mlagents/envs/brain.py
-
1ml-agents-envs/mlagents/envs/communicator.py
-
212ml-agents-envs/mlagents/envs/communicator_objects/agent_action_proto_pb2.py
-
387ml-agents-envs/mlagents/envs/communicator_objects/agent_info_proto_pb2.py
-
298ml-agents-envs/mlagents/envs/communicator_objects/brain_parameters_proto_pb2.py
-
61ml-agents-envs/mlagents/envs/communicator_objects/command_proto_pb2.py
-
72ml-agents-envs/mlagents/envs/communicator_objects/custom_action_pb2.py
-
72ml-agents-envs/mlagents/envs/communicator_objects/custom_observation_pb2.py
-
72ml-agents-envs/mlagents/envs/communicator_objects/custom_reset_parameters_pb2.py
-
198ml-agents-envs/mlagents/envs/communicator_objects/demonstration_meta_proto_pb2.py
-
223ml-agents-envs/mlagents/envs/communicator_objects/engine_configuration_proto_pb2.py
-
250ml-agents-envs/mlagents/envs/communicator_objects/environment_parameters_proto_pb2.py
-
123ml-agents-envs/mlagents/envs/communicator_objects/header_pb2.py
-
148ml-agents-envs/mlagents/envs/communicator_objects/resolution_proto_pb2.py
-
62ml-agents-envs/mlagents/envs/communicator_objects/space_type_proto_pb2.py
-
148ml-agents-envs/mlagents/envs/communicator_objects/unity_input_pb2.py
-
182ml-agents-envs/mlagents/envs/communicator_objects/unity_message_pb2.py
-
148ml-agents-envs/mlagents/envs/communicator_objects/unity_output_pb2.py
-
100ml-agents-envs/mlagents/envs/communicator_objects/unity_rl_initialization_input_pb2.py
-
225ml-agents-envs/mlagents/envs/communicator_objects/unity_rl_initialization_output_pb2.py
-
398ml-agents-envs/mlagents/envs/communicator_objects/unity_rl_input_pb2.py
-
326ml-agents-envs/mlagents/envs/communicator_objects/unity_rl_output_pb2.py
-
62ml-agents-envs/mlagents/envs/communicator_objects/unity_to_external_pb2.py
-
55ml-agents-envs/mlagents/envs/communicator_objects/unity_to_external_pb2_grpc.py
-
358ml-agents-envs/mlagents/envs/environment.py
-
39ml-agents-envs/mlagents/envs/exception.py
-
63ml-agents-envs/mlagents/envs/mock_communicator.py
-
10ml-agents-envs/mlagents/envs/rpc_communicator.py
-
17ml-agents-envs/mlagents/envs/socket_communicator.py
-
88ml-agents-envs/mlagents/envs/subprocess_environment.py
-
124ml-agents-envs/mlagents/envs/tests/test_envs.py
-
1ml-agents-envs/mlagents/envs/tests/test_rpc_communicator.py
-
60ml-agents-envs/mlagents/envs/tests/test_subprocess_unity_environment.py
-
38ml-agents-envs/setup.py
-
2ml-agents/mlagents/trainers/__init__.py
-
335ml-agents/mlagents/trainers/barracuda.py
-
100ml-agents/mlagents/trainers/bc/models.py
-
58ml-agents/mlagents/trainers/bc/offline_trainer.py
-
111ml-agents/mlagents/trainers/bc/online_trainer.py
-
61ml-agents/mlagents/trainers/bc/policy.py
-
53ml-agents/mlagents/trainers/bc/trainer.py
-
73ml-agents/mlagents/trainers/buffer.py
-
67ml-agents/mlagents/trainers/curriculum.py
-
44ml-agents/mlagents/trainers/demo_loader.py
-
5ml-agents/mlagents/trainers/exception.py
-
210ml-agents/mlagents/trainers/learn.py
-
56ml-agents/mlagents/trainers/meta_curriculum.py
-
464ml-agents/mlagents/trainers/models.py
-
83ml-agents/mlagents/trainers/policy.py
-
205ml-agents/mlagents/trainers/ppo/models.py
-
179ml-agents/mlagents/trainers/ppo/policy.py
-
321ml-agents/mlagents/trainers/ppo/trainer.py
-
877ml-agents/mlagents/trainers/tensorflow_to_barracuda.py
-
12ml-agents/mlagents/trainers/tests/test_barracuda_converter.py
-
110ml-agents/mlagents/trainers/tests/test_bc.py
-
62ml-agents/mlagents/trainers/tests/test_buffer.py
-
31ml-agents/mlagents/trainers/tests/test_curriculum.py
-
12ml-agents/mlagents/trainers/tests/test_demo_loader.py
-
60ml-agents/mlagents/trainers/tests/test_learn.py
-
98ml-agents/mlagents/trainers/tests/test_meta_curriculum.py
-
31ml-agents/mlagents/trainers/tests/test_policy.py
-
391ml-agents/mlagents/trainers/tests/test_ppo.py
-
206ml-agents/mlagents/trainers/tests/test_trainer_controller.py
-
47ml-agents/mlagents/trainers/tests/test_trainer_metrics.py
-
117ml-agents/mlagents/trainers/trainer.py
-
195ml-agents/mlagents/trainers/trainer_controller.py
-
53ml-agents/mlagents/trainers/trainer_metrics.py
-
62ml-agents/setup.py
|
|||
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! |
|||
import grpc |
|||
|
|||
from mlagents.envs.communicator_objects import unity_message_pb2 as mlagents_dot_envs_dot_communicator__objects_dot_unity__message__pb2 |
|||
from mlagents.envs.communicator_objects import ( |
|||
unity_message_pb2 as mlagents_dot_envs_dot_communicator__objects_dot_unity__message__pb2, |
|||
) |
|||
# missing associated documentation comment in .proto file |
|||
pass |
|||
# missing associated documentation comment in .proto file |
|||
pass |
|||
def __init__(self, channel): |
|||
"""Constructor. |
|||
def __init__(self, channel): |
|||
"""Constructor. |
|||
self.Exchange = channel.unary_unary( |
|||
'/communicator_objects.UnityToExternal/Exchange', |
|||
request_serializer=mlagents_dot_envs_dot_communicator__objects_dot_unity__message__pb2.UnityMessage.SerializeToString, |
|||
response_deserializer=mlagents_dot_envs_dot_communicator__objects_dot_unity__message__pb2.UnityMessage.FromString, |
|||
self.Exchange = channel.unary_unary( |
|||
"/communicator_objects.UnityToExternal/Exchange", |
|||
request_serializer=mlagents_dot_envs_dot_communicator__objects_dot_unity__message__pb2.UnityMessage.SerializeToString, |
|||
response_deserializer=mlagents_dot_envs_dot_communicator__objects_dot_unity__message__pb2.UnityMessage.FromString, |
|||
# missing associated documentation comment in .proto file |
|||
pass |
|||
# missing associated documentation comment in .proto file |
|||
pass |
|||
def Exchange(self, request, context): |
|||
"""Sends the academy parameters |
|||
def Exchange(self, request, context): |
|||
"""Sends the academy parameters |
|||
context.set_code(grpc.StatusCode.UNIMPLEMENTED) |
|||
context.set_details('Method not implemented!') |
|||
raise NotImplementedError('Method not implemented!') |
|||
context.set_code(grpc.StatusCode.UNIMPLEMENTED) |
|||
context.set_details("Method not implemented!") |
|||
raise NotImplementedError("Method not implemented!") |
|||
rpc_method_handlers = { |
|||
'Exchange': grpc.unary_unary_rpc_method_handler( |
|||
servicer.Exchange, |
|||
request_deserializer=mlagents_dot_envs_dot_communicator__objects_dot_unity__message__pb2.UnityMessage.FromString, |
|||
response_serializer=mlagents_dot_envs_dot_communicator__objects_dot_unity__message__pb2.UnityMessage.SerializeToString, |
|||
), |
|||
} |
|||
generic_handler = grpc.method_handlers_generic_handler( |
|||
'communicator_objects.UnityToExternal', rpc_method_handlers) |
|||
server.add_generic_rpc_handlers((generic_handler,)) |
|||
rpc_method_handlers = { |
|||
"Exchange": grpc.unary_unary_rpc_method_handler( |
|||
servicer.Exchange, |
|||
request_deserializer=mlagents_dot_envs_dot_communicator__objects_dot_unity__message__pb2.UnityMessage.FromString, |
|||
response_serializer=mlagents_dot_envs_dot_communicator__objects_dot_unity__message__pb2.UnityMessage.SerializeToString, |
|||
) |
|||
} |
|||
generic_handler = grpc.method_handlers_generic_handler( |
|||
"communicator_objects.UnityToExternal", rpc_method_handlers |
|||
) |
|||
server.add_generic_rpc_handlers((generic_handler,)) |
877
ml-agents/mlagents/trainers/tensorflow_to_barracuda.py
文件差异内容过多而无法显示
查看文件
文件差异内容过多而无法显示
查看文件
|
|||
import unittest.mock as mock |
|||
from mlagents.trainers import TrainerMetrics |
|||
|
|||
class TestTrainerMetrics: |
|||
class TestTrainerMetrics: |
|||
field_names = ['Brain name', 'Time to update policy', |
|||
'Time since start of training', |
|||
'Time for last experience collection', |
|||
'Number of experiences used for training', 'Mean return'] |
|||
field_names = [ |
|||
"Brain name", |
|||
"Time to update policy", |
|||
"Time since start of training", |
|||
"Time for last experience collection", |
|||
"Number of experiences used for training", |
|||
"Mean return", |
|||
] |
|||
|
|||
@mock.patch('mlagents.trainers.trainer_metrics.time', mock.MagicMock(return_value=42)) |
|||
@mock.patch( |
|||
"mlagents.trainers.trainer_metrics.time", mock.MagicMock(return_value=42) |
|||
) |
|||
mock_path = 'fake' |
|||
mock_brain_name = 'fake' |
|||
trainer_metrics = TrainerMetrics(path=mock_path, |
|||
brain_name=mock_brain_name) |
|||
mock_path = "fake" |
|||
mock_brain_name = "fake" |
|||
trainer_metrics = TrainerMetrics(path=mock_path, brain_name=mock_brain_name) |
|||
@mock.patch('mlagents.trainers.trainer_metrics.time', mock.MagicMock(return_value=42)) |
|||
@mock.patch( |
|||
"mlagents.trainers.trainer_metrics.time", mock.MagicMock(return_value=42) |
|||
) |
|||
mock_path = 'fake' |
|||
mock_brain_name = 'fake' |
|||
fake_buffer_length = 350 |
|||
mock_path = "fake" |
|||
mock_brain_name = "fake" |
|||
fake_buffer_length = 350 |
|||
trainer_metrics = TrainerMetrics(path=mock_path, |
|||
brain_name=mock_brain_name) |
|||
trainer_metrics = TrainerMetrics(path=mock_path, brain_name=mock_brain_name) |
|||
trainer_metrics.start_policy_update_timer(number_experiences=fake_buffer_length, |
|||
mean_return=fake_mean_return) |
|||
trainer_metrics.start_policy_update_timer( |
|||
number_experiences=fake_buffer_length, mean_return=fake_mean_return |
|||
) |
|||
fake_row = [mock_brain_name, 0,0, 0, 350, '0.300'] |
|||
fake_row = [mock_brain_name, 0, 0, 0, 350, "0.300"] |
|||
|
撰写
预览
正在加载...
取消
保存
Reference in new issue