浏览代码

add defaults to linear encoder, initialize ent encoders

/MLA-1734-demo-provider
Andrew Cohen 4 年前
当前提交
540b930b
共有 2 个文件被更改,包括 19 次插入6 次删除
  1. 8
      ml-agents/mlagents/trainers/torch/attention.py
  2. 17
      ml-agents/mlagents/trainers/torch/layers.py

8
ml-agents/mlagents/trainers/torch/attention.py


self.self_size = 0
self.ent_encoders = torch.nn.ModuleList(
[
LinearEncoder(self.self_size + ent_size, 1, embedding_size)
LinearEncoder(
self.self_size + ent_size,
1,
embedding_size,
kernel_init=Initialization.Normal,
kernel_gain=(0.125 / embedding_size) ** 0.5,
)
for ent_size in self.entity_sizes
]
)

17
ml-agents/mlagents/trainers/torch/layers.py


Linear layers.
"""
def __init__(self, input_size: int, num_layers: int, hidden_size: int):
def __init__(
self,
input_size: int,
num_layers: int,
hidden_size: int,
kernel_init: Initialization = Initialization.KaimingHeNormal,
kernel_gain: float = 1.0,
):
kernel_init=Initialization.KaimingHeNormal,
kernel_gain=1.0,
kernel_init=kernel_init,
kernel_gain=kernel_gain,
)
]
self.layers.append(Swish())

hidden_size,
hidden_size,
kernel_init=Initialization.KaimingHeNormal,
kernel_gain=1.0,
kernel_init=kernel_init,
kernel_gain=kernel_gain,
)
)
self.layers.append(Swish())

正在加载...
取消
保存