浏览代码

Merge branch 'develop-pytorch-encoding' into develop-restructure-torch-networks

/develop/restructure-torch-networks
GitHub 4 年前
当前提交
9364048e
共有 3 个文件被更改,包括 66 次插入38 次删除
  1. 57
      ml-agents/mlagents/trainers/torch/encoders.py
  2. 31
      ml-agents/mlagents/trainers/torch/layers.py
  3. 16
      ml-agents/mlagents/trainers/torch/networks.py

57
ml-agents/mlagents/trainers/torch/encoders.py


from mlagents.trainers.torch.layers import linear_layer, Initialization, Swish
import torch
from torch import nn

def __init__(self, input_size: int, normalize: bool = False):
self.normalizer: Optional[Normalizer] = None
super().__init__()
self.normalizer: Optional[Normalizer] = None
super().__init__()
if normalize:
self.normalizer = Normalizer(input_size)

return self.seq_layers(inputs)
return inputs
def copy_normalization(self, other_input: "VectorInput") -> None:
if self.normalizer is not None and other_input.normalizer is not None:

if self.normalizer is not None:
self.normalizer.update(inputs)
class SimpleVisualEncoder(nn.Module):

nn.Conv2d(16, 32, [4, 4], [2, 2]),
nn.LeakyReLU(),
)
self.dense = nn.Sequential(
linear_layer(
self.final_flat,
self.h_size,
kernel_init=Initialization.KaimingHeNormal,
kernel_gain=1.0,
),
nn.LeakyReLU(),
)
def output_size(self) -> int:
return self.final_flat
def forward(self, visual_obs: torch.Tensor) -> None:
def forward(self, visual_obs: torch.Tensor) -> torch.Tensor:
hidden = self.dense(hidden)
def __init__(self, height, width, initial_channels, output_size):
def __init__(
self, height: int, width: int, initial_channels: int, output_size: int
):
super().__init__()
self.h_size = output_size
conv_1_hw = conv_output_shape((height, width), 8, 4)

nn.Conv2d(64, 64, [3, 3], [1, 1]),
nn.LeakyReLU(),
)
self.dense = nn.Sequential(
linear_layer(
self.final_flat,
self.h_size,
kernel_init=Initialization.KaimingHeNormal,
kernel_gain=1.0,
),
nn.LeakyReLU(),
)
def output_size(self) -> int:
return self.final_flat
def forward(self, visual_obs: torch.Tensor) -> None:
def forward(self, visual_obs: torch.Tensor) -> torch.Tensor:
hidden = self.dense(hidden)
return hidden

class ResNetVisualEncoder(nn.Module):
def __init__(self, height, width, initial_channels, final_hidden):
def __init__(self, height: int, width: int, initial_channels: int):
super().__init__()
n_channels = [16, 32, 32] # channel for each stack
n_blocks = 2 # number of residual blocks

layers.append(ResNetBlock(channel))
last_channel = channel
layers.append(Swish())
self.dense = linear_layer(
n_channels[-1] * height * width,
final_hidden,
kernel_init=Initialization.KaimingHeNormal,
kernel_gain=1.0,
)
self._output_size = n_channels[-1] * height * width
def forward(self, visual_obs):
def output_size(self) -> int:
return self._output_size
def forward(self, visual_obs: torch.Tensor) -> torch.Tensor:
return torch.relu(self.dense(before_out))
return torch.relu(before_out)

31
ml-agents/mlagents/trainers/torch/layers.py


pass
class LinearEncoder(torch.nn.Module):
"""
Linear layers.
"""
def __init__(self, input_size: int, num_layers: int, hidden_size: int):
self.layers = [
linear_layer(
input_size,
hidden_size,
kernel_init=Initialization.KaimingHeNormal,
kernel_gain=1.0,
)
]
self.layers.append(Swish())
for _ in range(num_layers - 1):
self.layers.append(
linear_layer(
hidden_size,
hidden_size,
kernel_init=Initialization.KaimingHeNormal,
kernel_gain=1.0,
)
)
self.layers.append(Swish())
self.seq_layers = torch.nn.Sequential(*self.layers)
def forward(self, input_tensor: torch.Tensor) -> torch.Tensor:
return self.seq_layers(input_tensor)
class LSTM(MemoryModule):
"""
Memory module that implements LSTM.

16
ml-agents/mlagents/trainers/torch/networks.py


from mlagents.trainers.settings import NetworkSettings
from mlagents.trainers.torch.utils import ModelUtils
from mlagents.trainers.torch.decoders import ValueHeads
from mlagents.trainers.torch.layers import LSTM
from mlagents.trainers.torch.layers import LSTM, LinearEncoder
ActivationFunction = Callable[[torch.Tensor], torch.Tensor]
EncoderFunction = Callable[

self.h_size,
network_settings.vis_encode_type,
normalize=self.normalize,
)
input_size = sum(
_input.size for _input in self.visual_inputs + self.vector_inputs
)
self.linear_encoder = LinearEncoder(
input_size, network_settings.num_layers, self.h_size
)
if self.use_lstm:

if len(encodes) == 0:
raise Exception("No valid inputs to network.")
# Constants don't work in Barracuda
encoding = torch.cat(encodes + [actions], dim=-1)
input = torch.cat(encodes + [actions], dim=-1)
encoding = torch.cat(encodes, dim=-1)
input = torch.cat(encodes, dim=-1)
# HIDDEN LAYERS
if self.use_lstm:
# Resize to (batch, sequence length, encoding size)
encoding = encoding.reshape([-1, sequence_length, self.h_size])

正在加载...
取消
保存