浏览代码

add flake8 to precommit

/develop-generalizationTraining-TrainerController
Chris Elion 5 年前
当前提交
bb7773c1
共有 12 个文件被更改,包括 38 次插入35 次删除
  1. 5
      .pre-commit-config.yaml
  2. 12
      ml-agents-envs/mlagents/envs/exception.py
  3. 2
      ml-agents-envs/mlagents/envs/mock_communicator.py
  4. 2
      ml-agents-envs/mlagents/envs/rpc_communicator.py
  5. 6
      ml-agents-envs/mlagents/envs/socket_communicator.py
  6. 8
      ml-agents/mlagents/trainers/barracuda.py
  7. 10
      ml-agents/mlagents/trainers/buffer.py
  8. 2
      ml-agents/mlagents/trainers/learn.py
  9. 4
      ml-agents/mlagents/trainers/ppo/models.py
  10. 2
      ml-agents/mlagents/trainers/ppo/trainer.py
  11. 16
      ml-agents/mlagents/trainers/tensorflow_to_barracuda.py
  12. 4
      ml-agents/mlagents/trainers/trainer.py

5
.pre-commit-config.yaml


name: mypy-ml-agents-envs
files: "ml-agents-envs/.*"
# Exclude protobuf files and don't follow them when imported
exclude: ".*_pb2.py"
# TODO get disallow-incomplete-defs working
args: [--ignore-missing-imports, --follow-imports=silent]
- id: mypy

.*cs.meta|
.*.css
)$
args: [--fix=lf]
args: [--fix=lf]
- id: flake8
exclude: ".*_pb2.py"

12
ml-agents-envs/mlagents/envs/exception.py


with open(log_file_path, "r") as f:
printing = False
unity_error = "\n"
for l in f:
l = l.strip()
if (l == "Exception") or (l == "Error"):
for line in f:
line = line.strip()
if (line == "Exception") or (line == "Error"):
if l == "":
if line == "":
unity_error += l + "\n"
unity_error += line + "\n"
logger.info(unity_error)
logger.error(
"An error might have occured in the environment. "

)
except:
except Exception:
logger.error(
"An error might have occured in the environment. "
"No UnitySDK.log file could be found."

2
ml-agents-envs/mlagents/envs/mock_communicator.py


try:
fake_brain = inputs.rl_input.agent_actions["RealFakeBrain"]
global_done = fake_brain.value[0].vector_actions[0] == -1
except:
except Exception:
pass
result = UnityRLOutput(global_done=global_done, agentInfos=dict_agent_info)
return UnityOutput(rl_output=result)

2
ml-agents-envs/mlagents/envs/rpc_communicator.py


self.server.add_insecure_port("[::]:" + str(self.port))
self.server.start()
self.is_open = True
except:
except Exception:
raise UnityWorkerInUseException(self.worker_id)
def check_port(self, port):

6
ml-agents-envs/mlagents/envs/socket_communicator.py


self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._socket.bind(("localhost", self.port))
except:
except Exception:
raise UnityTimeOutException(
"Couldn't start socket communication because worker number {} is still in use. "
"You may need to manually close a previously opened environment "

self._socket.listen(1)
self._conn, _ = self._socket.accept()
self._conn.settimeout(30)
except:
except Exception:
raise UnityTimeOutException(
"The Unity environment took too long to respond. Make sure that :\n"
"\t The environment does not need user interaction to launch\n"

s = s[4:]
while len(s) != message_length:
s += self._conn.recv(self._buffer_size)
except socket.timeout as e:
except socket.timeout:
raise UnityTimeOutException("The environment took too long to respond.")
return s

8
ml-agents/mlagents/trainers/barracuda.py


# Recur for all the vertices adjacent to this vertex
for i in self.graph[v]:
if visited[i] == False:
if not visited[i]:
self.topologicalSortUtil(i, visited, stack)
# Push current vertex to stack which stores result

# Call the recursive helper function to store Topological
# Sort starting from all vertices one by one
for i in range(self.V):
if visited[i] == False:
if not visited[i]:
self.topologicalSortUtil(i, visited, stack)
# print(stack)

def write_shape(self, s):
self.write_int32(len(s))
for el in s:
self.write_int32(el if el != None else -1)
self.write_int32(el if el is not None else -1)
def close(self):
self.f.close()

w.write_int32(len(model.layers))
for l in model.layers:
assert not l.name in l.inputs
assert l.name not in l.inputs
w.write_str(l.name)
w.write_int32(l.type)

10
ml-agents/mlagents/trainers/buffer.py


def append(self, element, padding_value=0):
"""
Adds an element to this list. Also lets you change the padding
Adds an element to this list. Also lets you change the padding
be padded with 1.)
be padded with 1.)
:param element: The element to append to the list.
:param padding_value: The value used to pad when get_batch is called.
"""

"""
if len(key_list) < 2:
return True
l = None
length = None
if (l is not None) and (l != len(self[key])):
if (length is not None) and (length != len(self[key])):
l = len(self[key])
length = len(self[key])
return True
def shuffle(self, key_list=None):

2
ml-agents/mlagents/trainers/learn.py


"""
)
except:
except Exception:
print("\n\n\tUnity Technologies\n")
_USAGE = """

4
ml-agents/mlagents/trainers/ppo/models.py


:param use_recurrent: Whether to use an LSTM layer in the network.
:param num_layers Number of hidden layers between encoded input and policy & value layers
:param m_size: Size of brain memory.
:param seed: Seed to use for initialization of model.
:param stream_names: List of names of value streams. Usually, a list of the Reward Signals being used.
:param seed: Seed to use for initialization of model.
:param stream_names: List of names of value streams. Usually, a list of the Reward Signals being used.
:return: a sub-class of PPOAgent tailored to the environment.
"""
LearningModel.__init__(

2
ml-agents/mlagents/trainers/ppo/trainer.py


n_sequences = max(
int(self.trainer_parameters["batch_size"] / self.policy.sequence_length), 1
)
value_total, policy_total, forward_total, inverse_total = [], [], [], []
value_total, policy_total = [], []
advantages = self.training_buffer.update_buffer["advantages"].get_batch()
self.training_buffer.update_buffer["advantages"].set(
(advantages - advantages.mean()) / (advantages.std() + 1e-10)

16
ml-agents/mlagents/trainers/tensorflow_to_barracuda.py


else:
activation = "Linear"
if not class_name in known_classes:
if class_name not in known_classes:
if class_name in requires_runtime_flag:
print("SKIP:", class_name, "layer is used only for training")
else:

auto_pad = get_attr(layer, "padding") # layer.attr['padding'].s.decode("utf-8")
pads = get_attr(layer, "pads")
strides = get_attr(layer, "strides") # layer.attr['strides'].list.i
dilations = get_attr(layer, "dilations") # layer.attr['dilations'].list.i
# TODO remove?
# dilations = get_attr(layer, "dilations") # layer.attr['dilations'].list.i
pool_size = get_attr(layer, "ksize") # layer.attr['ksize'].list.i
shape = get_attr(layer, "shape")
starts = get_attr(layer, "starts")

alpha = get_attr(layer, "alpha", default=1)
beta = get_attr(layer, "beta")
if activation and not activation in known_activations:
if activation and activation not in known_activations:
if auto_pad and not auto_pad in known_paddings:
if auto_pad and auto_pad not in known_paddings:
if data_frmt and not data_frmt in supported_data_formats:
if data_frmt and data_frmt not in supported_data_formats:
print("UNSUPPORTED: data format", data_frmt)
o_l.activation = known_activations.get(activation) or 0

-1 not in input_ranks
) # for rank() lambda all input ranks have to be known (not -1)
rank = rank(input_ranks)
if rank == None:
if rank is None:
def all_elements_equal(arr): # http://stackoverflow.com/q/3844948/
return arr.count(arr[0]) == len(arr)

o_model.layers = cleanup_layers(o_model.layers)
all_inputs = {i for l in o_model.layers for i in l.inputs}
embedded_tensors = {t.name for l in o_model.layers for t in l.tensors}
# TODO remove?
# embedded_tensors = {t.name for l in o_model.layers for t in l.tensors}
# Trim
if trim_unused_by_output:

4
ml-agents/mlagents/trainers/trainer.py


"""
Takes a parameter dictionary and converts it to a human-readable string.
Recurses if there are multiple levels of dict. Used to print out hyperaparameters.
param: param_dict: A Dictionary of key, value parameters.
param: param_dict: A Dictionary of key, value parameters.
return: A string version of this dictionary.
"""
if not isinstance(param_dict, dict):

)
s = sess.run(s_op)
self.summary_writer.add_summary(s, self.get_step)
except:
except Exception:
LOGGER.info(
"Cannot write text summary for Tensorboard. Tensorflow version must be r1.2 or above."
)
正在加载...
取消
保存