浏览代码

Turning some logger.info into logger.debug and remove some logging overhead when not using debug (#5211)

* turning some logger.info into logger.debug and remove some logging overhead when not using debug

* Addressing comments

* Adding to changelog
/check-for-ModelOverriders
GitHub 3 年前
当前提交
2e19759c
共有 11 个文件被更改,包括 38 次插入19 次删除
  1. 1
      com.unity.ml-agents/CHANGELOG.md
  2. 6
      ml-agents-envs/mlagents_envs/environment.py
  3. 27
      ml-agents-envs/mlagents_envs/logging_util.py
  4. 2
      ml-agents/mlagents/torch_utils/torch.py
  5. 2
      ml-agents/mlagents/trainers/learn.py
  6. 4
      ml-agents/mlagents/trainers/policy/checkpoint_manager.py
  7. 4
      ml-agents/mlagents/trainers/sac/trainer.py
  8. 5
      ml-agents/mlagents/trainers/stats.py
  9. 2
      ml-agents/mlagents/trainers/subprocess_env_manager.py
  10. 2
      ml-agents/mlagents/trainers/torch/model_serialization.py
  11. 2
      ml-agents/mlagents/trainers/trainer_controller.py

1
com.unity.ml-agents/CHANGELOG.md


- Modified the [GridWorld environment](https://github.com/Unity-Technologies/ml-agents/blob/main/docs/Learning-Environment-Examples.md#gridworld) to use the new `Goal Signal` feature. (#5193)
#### ml-agents / ml-agents-envs / gym-unity (Python)
- Some console output have been moved from `info` to `debug` and will not be printed by default. If you want all messages to be printed, you can run `mlagents-learn` with the `--debug` option or add the line `debug: true` at the top of the yaml config file. (#5211)
### Bug Fixes
#### com.unity.ml-agents / com.unity.ml-agents.extensions (C#)

6
ml-agents-envs/mlagents_envs/environment.py


agent = agent_infos.value[0]
new_spec = behavior_spec_from_proto(brain_param, agent)
self._env_specs[brain_param.brain_name] = new_spec
logger.info(f"Connected new brain:\n{brain_param.brain_name}")
logger.info(f"Connected new brain: {brain_param.brain_name}")
def _update_state(self, output: UnityRLOutputProto) -> None:
"""

# Wait a bit for the process to shutdown, but kill it if it takes too long
try:
self._process.wait(timeout=timeout)
logger.info(self._returncode_to_env_message(self._process.returncode))
logger.debug(self._returncode_to_env_message(self._process.returncode))
logger.info("Environment timed out shutting down. Killing...")
logger.warning("Environment timed out shutting down. Killing...")
self._process.kill()
# Set to None so we don't try to close multiple times.
self._process = None

27
ml-agents-envs/mlagents_envs/logging_util.py


_loggers = set()
_log_level = NOTSET
DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
LOG_FORMAT = "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s"
DEBUG_LOG_FORMAT = "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s"
LOG_FORMAT = "[%(levelname)s] %(message)s"
def get_logger(name: str) -> logging.Logger:

"""
logger = logging.getLogger(name=name)
# If we've already set the log level, make sure new loggers use it
if _log_level != NOTSET:
logger.setLevel(_log_level)
if _log_level == DEBUG:
formatter = logging.Formatter(fmt=DEBUG_LOG_FORMAT, datefmt=DATE_FORMAT)
else:
formatter = logging.Formatter(fmt=LOG_FORMAT)
formatter = logging.Formatter(fmt=LOG_FORMAT, datefmt=DATE_FORMAT)
# If we've already set the log level, make sure new loggers use it
if _log_level != NOTSET:
logger.setLevel(_log_level)
# Keep track of this logger so that we can change the log level later
_loggers.add(logger)
return logger

for logger in _loggers:
logger.setLevel(log_level)
if log_level == DEBUG:
formatter = logging.Formatter(fmt=DEBUG_LOG_FORMAT, datefmt=DATE_FORMAT)
else:
formatter = logging.Formatter(LOG_FORMAT)
_set_formatter_for_all_loggers(formatter)
def _set_formatter_for_all_loggers(formatter: logging.Formatter) -> None:
for logger in _loggers:
for handler in logger.handlers[:]:
handler.setFormatter(formatter)

2
ml-agents/mlagents/torch_utils/torch.py


torch.set_default_tensor_type(torch.cuda.FloatTensor)
else:
torch.set_default_tensor_type(torch.FloatTensor)
logger.info(f"default Torch device: {_device}")
logger.debug(f"default Torch device: {_device}")
# Initialize to default settings

2
ml-agents/mlagents/trainers/learn.py


if options.env_settings.seed == -1:
run_seed = np.random.randint(0, 10000)
logger.info(f"run_seed set to {run_seed}")
logger.debug(f"run_seed set to {run_seed}")
run_training(run_seed, options)

4
ml-agents/mlagents/trainers/policy/checkpoint_manager.py


file_path: str = checkpoint["file_path"]
if os.path.exists(file_path):
os.remove(file_path)
logger.info(f"Removed checkpoint model {file_path}.")
logger.debug(f"Removed checkpoint model {file_path}.")
logger.info(f"Checkpoint at {file_path} could not be found.")
logger.debug(f"Checkpoint at {file_path} could not be found.")
return
@classmethod

4
ml-agents/mlagents/trainers/sac/trainer.py


Loads the last saved replay buffer from a file.
"""
filename = os.path.join(self.artifact_path, "last_replay_buffer.hdf5")
logger.info(f"Loading Experience Replay Buffer from {filename}")
logger.info(f"Loading Experience Replay Buffer from {filename}...")
logger.info(
logger.debug(
"Experience replay buffer has {} experiences.".format(
self.update_buffer.num_experiences
)

5
ml-agents/mlagents/trainers/stats.py


for file_name in os.listdir(directory_name):
if file_name.startswith("events.out"):
logger.warning(
f"{file_name} was left over from a previous run. Deleting."
f"Deleting TensorBoard data {file_name} that was left over from a"
"previous run."
logger.warning(
logger.error(
"{} was left over from a previous run and "
"not deleted.".format(full_fname)
)

2
ml-agents/mlagents/trainers/subprocess_env_manager.py


UnityEnvironmentException,
UnityCommunicatorStoppedException,
) as ex:
logger.info(f"UnityEnvironment worker {worker_id}: environment stopping.")
logger.debug(f"UnityEnvironment worker {worker_id}: environment stopping.")
step_queue.put(
EnvironmentResponse(EnvironmentCommand.ENV_EXITED, worker_id, ex)
)

2
ml-agents/mlagents/trainers/torch/model_serialization.py


:param output_filepath: file path to output the model (without file suffix)
"""
onnx_output_path = f"{output_filepath}.onnx"
logger.info(f"Converting to {onnx_output_path}")
logger.debug(f"Converting to {onnx_output_path}")
with exporting_to_onnx():
torch.onnx.export(

2
ml-agents/mlagents/trainers/trainer_controller.py


for brain_name in self.trainers.keys():
self.trainers[brain_name].save_model()
self.logger.info("Saved Model")
self.logger.debug("Saved Model")
@staticmethod
def _create_output_path(output_path):

正在加载...
取消
保存