|
|
|
|
|
|
else 0 |
|
|
|
) |
|
|
|
|
|
|
|
self.visual_processors, self.vector_processors, encoder_input_size = ModelUtils.create_input_processors( |
|
|
|
self.processors, encoder_input_size = ModelUtils.create_input_processors( |
|
|
|
observation_shapes, |
|
|
|
self.h_size, |
|
|
|
network_settings.vis_encode_type, |
|
|
|
|
|
|
else: |
|
|
|
self.lstm = None # type: ignore |
|
|
|
|
|
|
|
def update_normalization(self, vec_inputs: List[torch.Tensor]) -> None: |
|
|
|
for vec_input, vec_enc in zip(vec_inputs, self.vector_processors): |
|
|
|
vec_enc.update_normalization(vec_input) |
|
|
|
def update_normalization(self, net_inputs: List[torch.Tensor]) -> None: |
|
|
|
for _in, enc in zip(net_inputs, self.processors): |
|
|
|
enc.update_normalization(_in) |
|
|
|
|
|
|
|
def copy_normalization(self, other_network: "NetworkBody") -> None: |
|
|
|
if self.normalize: |
|
|
|
|
|
|
|
|
|
|
def forward( |
|
|
|
self, |
|
|
|
vec_inputs: List[torch.Tensor], |
|
|
|
vis_inputs: List[torch.Tensor], |
|
|
|
inputs: List[torch.Tensor], |
|
|
|
for idx, processor in enumerate(self.vector_processors): |
|
|
|
vec_input = vec_inputs[idx] |
|
|
|
processed_vec = processor(vec_input) |
|
|
|
for idx, processor in enumerate(self.processors): |
|
|
|
net_input = inputs[idx] |
|
|
|
if not exporting_to_onnx.is_exporting() and len(net_input.shape) > 3: |
|
|
|
net_input = net_input.permute([0, 3, 1, 2]) |
|
|
|
processed_vec = processor(net_input) |
|
|
|
|
|
|
|
for idx, processor in enumerate(self.visual_processors): |
|
|
|
vis_input = vis_inputs[idx] |
|
|
|
if not exporting_to_onnx.is_exporting(): |
|
|
|
vis_input = vis_input.permute([0, 3, 1, 2]) |
|
|
|
processed_vis = processor(vis_input) |
|
|
|
encodes.append(processed_vis) |
|
|
|
|
|
|
|
if len(encodes) == 0: |
|
|
|
raise Exception("No valid inputs to network.") |
|
|
|
|
|
|
|
|
|
|
def forward( |
|
|
|
self, |
|
|
|
vec_inputs: List[torch.Tensor], |
|
|
|
vis_inputs: List[torch.Tensor], |
|
|
|
net_inputs: List[torch.Tensor], |
|
|
|
vec_inputs, vis_inputs, actions, memories, sequence_length |
|
|
|
net_inputs, actions, memories, sequence_length |
|
|
|
) |
|
|
|
output = self.value_heads(encoding) |
|
|
|
return output, memories |
|
|
|
|
|
|
def memory_size(self) -> int: |
|
|
|
return self.network_body.memory_size |
|
|
|
|
|
|
|
def update_normalization(self, vector_obs: List[torch.Tensor]) -> None: |
|
|
|
self.network_body.update_normalization(vector_obs) |
|
|
|
def update_normalization(self, obs: List[torch.Tensor]) -> None: |
|
|
|
self.network_body.update_normalization(obs) |
|
|
|
|
|
|
|
def sample_action(self, dists: List[DistInstance]) -> List[torch.Tensor]: |
|
|
|
actions = [] |
|
|
|
|
|
|
|
|
|
|
def get_dists( |
|
|
|
self, |
|
|
|
vec_inputs: List[torch.Tensor], |
|
|
|
vis_inputs: List[torch.Tensor], |
|
|
|
net_inputs: List[torch.Tensor], |
|
|
|
vec_inputs, vis_inputs, memories=memories, sequence_length=sequence_length |
|
|
|
net_inputs, memories=memories, sequence_length=sequence_length |
|
|
|
) |
|
|
|
if self.action_spec.is_continuous(): |
|
|
|
dists = self.distribution(encoding) |
|
|
|
|
|
|
|
|
|
|
def forward( |
|
|
|
self, |
|
|
|
vec_inputs: List[torch.Tensor], |
|
|
|
vis_inputs: List[torch.Tensor], |
|
|
|
net_inputs: List[torch.Tensor], |
|
|
|
masks: Optional[torch.Tensor] = None, |
|
|
|
memories: Optional[torch.Tensor] = None, |
|
|
|
) -> Tuple[torch.Tensor, int, int, int, int]: |
|
|
|
|
|
|
dists, _ = self.get_dists(vec_inputs, vis_inputs, masks, memories, 1) |
|
|
|
dists, _ = self.get_dists(net_inputs, masks, memories, 1) |
|
|
|
if self.action_spec.is_continuous(): |
|
|
|
action_list = self.sample_action(dists) |
|
|
|
action_out = torch.stack(action_list, dim=-1) |
|
|
|
|
|
|
|
|
|
|
def critic_pass( |
|
|
|
self, |
|
|
|
vec_inputs: List[torch.Tensor], |
|
|
|
vis_inputs: List[torch.Tensor], |
|
|
|
net_inputs: List[torch.Tensor], |
|
|
|
vec_inputs, vis_inputs, memories=memories, sequence_length=sequence_length |
|
|
|
net_inputs, memories=memories, sequence_length=sequence_length |
|
|
|
vec_inputs: List[torch.Tensor], |
|
|
|
vis_inputs: List[torch.Tensor], |
|
|
|
net_inputs: List[torch.Tensor], |
|
|
|
vec_inputs, vis_inputs, memories=memories, sequence_length=sequence_length |
|
|
|
net_inputs, memories=memories, sequence_length=sequence_length |
|
|
|
) |
|
|
|
if self.action_spec.is_continuous(): |
|
|
|
dists = self.distribution(encoding) |
|
|
|
|
|
|
|
|
|
|
def get_dist_and_value( |
|
|
|
self, |
|
|
|
vec_inputs: List[torch.Tensor], |
|
|
|
vis_inputs: List[torch.Tensor], |
|
|
|
net_inputs: List[torch.Tensor], |
|
|
|
masks: Optional[torch.Tensor] = None, |
|
|
|
memories: Optional[torch.Tensor] = None, |
|
|
|
sequence_length: int = 1, |
|
|
|
|
|
|
critic_mem = None |
|
|
|
actor_mem = None |
|
|
|
dists, actor_mem_outs = self.get_dists( |
|
|
|
vec_inputs, |
|
|
|
vis_inputs, |
|
|
|
net_inputs, |
|
|
|
vec_inputs, vis_inputs, memories=critic_mem, sequence_length=sequence_length |
|
|
|
net_inputs, memories=critic_mem, sequence_length=sequence_length |
|
|
|
) |
|
|
|
if self.use_lstm: |
|
|
|
mem_out = torch.cat([actor_mem_outs, critic_mem_outs], dim=-1) |
|
|
|