浏览代码

removed extraneous logging imports and loggers

/bug-failed-api-check
Anupam Bhatnagar 5 年前
当前提交
f4dbedcf
共有 18 个文件被更改,包括 17 次插入79 次删除
  1. 4
      ml-agents-envs/mlagents_envs/communicator.py
  2. 5
      ml-agents-envs/mlagents_envs/exception.py
  3. 3
      ml-agents-envs/mlagents_envs/rpc_communicator.py
  4. 3
      ml-agents-envs/mlagents_envs/rpc_utils.py
  5. 12
      ml-agents/mlagents/logging_util.py
  6. 4
      ml-agents/mlagents/trainers/brain.py
  7. 3
      ml-agents/mlagents/trainers/components/reward_signals/gail/signal.py
  8. 4
      ml-agents/mlagents/trainers/components/reward_signals/reward_signal_factory.py
  9. 4
      ml-agents/mlagents/trainers/demo_loader.py
  10. 7
      ml-agents/mlagents/trainers/ghost/trainer.py
  11. 3
      ml-agents/mlagents/trainers/models.py
  12. 5
      ml-agents/mlagents/trainers/policy/nn_policy.py
  13. 3
      ml-agents/mlagents/trainers/policy/tf_policy.py
  14. 5
      ml-agents/mlagents/trainers/ppo/optimizer.py
  15. 6
      ml-agents/mlagents/trainers/sac/network.py
  16. 14
      ml-agents/mlagents/trainers/sac/optimizer.py
  17. 3
      ml-agents/mlagents/trainers/trainer/rl_trainer.py
  18. 8
      ml-agents/mlagents/trainers/trainer/trainer.py

4
ml-agents-envs/mlagents_envs/communicator.py


import logging
logger = logging.getLogger("mlagents_envs")
class Communicator(object):

5
ml-agents-envs/mlagents_envs/exception.py


import logging
logger = logging.getLogger("mlagents_envs")
class UnityException(Exception):
"""
Any error related to ml-agents environment.

3
ml-agents-envs/mlagents_envs/rpc_communicator.py


import logging
import grpc
from typing import Optional

from mlagents_envs.communicator_objects.unity_input_pb2 import UnityInputProto
from mlagents_envs.communicator_objects.unity_output_pb2 import UnityOutputProto
from .exception import UnityTimeOutException, UnityWorkerInUseException
logger = logging.getLogger("mlagents_envs")
class UnityToExternalServicerImplementation(UnityToExternalProtoServicer):

3
ml-agents-envs/mlagents_envs/rpc_utils.py


NONE as COMPRESSION_NONE,
)
from mlagents_envs.communicator_objects.brain_parameters_pb2 import BrainParametersProto
import logging
logger = logging.getLogger("mlagents_envs")
def agent_group_spec_from_proto(

12
ml-agents/mlagents/logging_util.py


def create_logger(name):
log_level = logging.INFO
date_format = "%Y-%m-%d %H:%M:%S"
logging.basicConfig(level=log_level, format=log_format, datefmt="%Y-%m-%d %H:%M:%S")
logging.basicConfig(level=log_level, format=log_format, datefmt=date_format)
# TODO
"""
1. change mlagents.trainers to mlagents_trainers
2. logger should be in lowercase in all files
3. if logger is not used in a file then it should be deleted
"""

4
ml-agents/mlagents/trainers/brain.py


import logging
logger = logging.getLogger("mlagents.trainers")
class CameraResolution(NamedTuple):

3
ml-agents/mlagents/trainers/components/reward_signals/gail/signal.py


from typing import Any, Dict, List
import logging
import numpy as np
from mlagents.tf_utils import tf

from mlagents.trainers.demo_loader import demo_to_buffer
LOGGER = logging.getLogger("mlagents.trainers")
class GAILRewardSignal(RewardSignal):

4
ml-agents/mlagents/trainers/components/reward_signals/reward_signal_factory.py


import logging
from mlagents.trainers.exception import UnityTrainerException
from mlagents.trainers.components.reward_signals import RewardSignal
from mlagents.trainers.components.reward_signals.extrinsic.signal import (

CuriosityRewardSignal,
)
from mlagents.trainers.policy.tf_policy import TFPolicy
logger = logging.getLogger("mlagents.trainers")
NAME_TO_CLASS: Dict[str, Type[RewardSignal]] = {

4
ml-agents/mlagents/trainers/demo_loader.py


import logging
import os
from typing import List, Tuple
import numpy as np

)
from mlagents_envs.timers import timed, hierarchical_timer
from google.protobuf.internal.decoder import _DecodeVarint32 # type: ignore
logger = logging.getLogger("mlagents.trainers")
@timed

7
ml-agents/mlagents/trainers/ghost/trainer.py


# # Unity ML-Agents Toolkit
# ## ML-Agent Learning (Ghost Trainer)
# import logging
from typing import Deque, Dict, List, Any, cast
import numpy as np

from mlagents.trainers.trajectory import Trajectory
from mlagents.trainers.agent_processor import AgentManagerQueue
LOGGER = logging.getLogger("mlagents.trainers")
logger = logging.getLogger("mlagents.trainers")
class GhostTrainer(Trainer):

Saves training statistics to Tensorboard.
"""
opponents = np.array(self.policy_elos, dtype=np.float32)
LOGGER.info(
logger.info(
" Learning brain {} ELO: {:0.3f}\n"
"Mean Opponent ELO: {:0.3f}"
" Std Opponent ELO: {:0.3f}".format(

x = "current"
self.policy_elos[-1] = self.current_elo
self.current_opponent = -1 if x == "current" else x
LOGGER.debug(
logger.debug(
"Step {}: Swapping snapshot {} to id {} with {} learning".format(
self.get_step, x, name_behavior_id, self.learning_behavior_name
)

3
ml-agents/mlagents/trainers/models.py


import logging
from enum import Enum
from typing import Callable, Dict, List, Tuple, NamedTuple

from mlagents.trainers.exception import UnityTrainerException
from mlagents.trainers.brain import CameraResolution
logger = logging.getLogger("mlagents.trainers")
ActivationFunction = Callable[[tf.Tensor], tf.Tensor]
EncoderFunction = Callable[

5
ml-agents/mlagents/trainers/policy/nn_policy.py


import logging
from mlagents_envs.timers import timed
from mlagents_envs.base_env import BatchedStepResult
from mlagents.trainers.brain import BrainParameters

GaussianDistribution,
MultiCategoricalDistribution,
)
logger = logging.getLogger("mlagents.trainers")
EPSILON = 1e-6 # Small value to avoid divide by zero

3
ml-agents/mlagents/trainers/policy/tf_policy.py


import logging
from typing import Any, Dict, List, Optional
from mlagents_envs.exception import UnityException
from mlagents.trainers.policy import Policy
from mlagents.trainers.action_info import ActionInfo

5
ml-agents/mlagents/trainers/ppo/optimizer.py


import logging
import numpy as np
from mlagents.tf_utils import tf
from mlagents_envs.timers import timed

from mlagents.trainers.buffer import AgentBuffer
logger = logging.getLogger("mlagents.trainers")
class PPOOptimizer(TFOptimizer):

6
ml-agents/mlagents/trainers/sac/network.py


import logging
from mlagents.trainers.models import ModelUtils, EncoderType
LOG_STD_MAX = 2

CONTINUOUS_TARGET_ENTROPY_SCALE = 1.0 # TODO: Make these an optional hyperparam.
LOGGER = logging.getLogger("mlagents.trainers")
POLICY_SCOPE = ""
TARGET_SCOPE = "target_network"

14
ml-agents/mlagents/trainers/sac/optimizer.py


EPSILON = 1e-6 # Small value to avoid divide by zero
LOGGER = logging.getLogger("mlagents.trainers")
logger = logging.getLogger("mlagents.trainers")
POLICY_SCOPE = ""
TARGET_SCOPE = "target_network"

self.target_network.value_vars, self.policy_network.value_vars
)
]
LOGGER.debug("value_vars")
logger.debug("value_vars")
LOGGER.debug("targvalue_vars")
logger.debug("targvalue_vars")
LOGGER.debug("critic_vars")
logger.debug("critic_vars")
LOGGER.debug("q_vars")
logger.debug("q_vars")
LOGGER.debug("policy_vars")
logger.debug("policy_vars")
policy_vars = self.policy.get_trainable_variables()
self.print_all_vars(policy_vars)

def print_all_vars(self, variables):
for _var in variables:
LOGGER.debug(_var)
logger.debug(_var)
@timed
def update(self, batch: AgentBuffer, num_sequences: int) -> Dict[str, float]:

3
ml-agents/mlagents/trainers/trainer/rl_trainer.py


# # Unity ML-Agents Toolkit
import logging
from typing import Dict
from collections import defaultdict

from mlagents.trainers.exception import UnityTrainerException
from mlagents.trainers.components.reward_signals import RewardSignalResult
LOGGER = logging.getLogger("mlagents.trainers")
RewardSignalResults = Dict[str, RewardSignalResult]

8
ml-agents/mlagents/trainers/trainer/trainer.py


from mlagents.trainers.exception import UnityTrainerException
from mlagents_envs.timers import hierarchical_timer
LOGGER = logging.getLogger("mlagents.trainers")
logger = logging.getLogger("mlagents.trainers")
class Trainer(abc.ABC):

s = sess.run(s_op)
self.stats_reporter.write_text(s, self.get_step)
except Exception:
LOGGER.info("Could not write text summary for Tensorboard.")
logger.info("Could not write text summary for Tensorboard.")
pass
def _dict_to_str(self, param_dict: Dict[str, Any], num_tabs: int) -> str:

"Environment/Cumulative Reward"
)
if stats_summary.num > 0:
LOGGER.info(
logger.info(
"{}: {}: Step: {}. "
"Time Elapsed: {:0.3f} s "
"Mean "

)
set_gauge(f"{self.brain_name}.mean_reward", stats_summary.mean)
else:
LOGGER.info(
logger.info(
" {}: {}: Step: {}. No episode was completed since last summary. {}".format(
self.run_id, self.brain_name, step, is_training
)

正在加载...
取消
保存