|
|
|
|
|
|
|
|
|
|
for idx, encoder in enumerate(self.visual_encoders): |
|
|
|
vis_input = vis_inputs[idx] |
|
|
|
vis_input = vis_input.permute([0, 3, 1, 2]) |
|
|
|
if not torch.onnx.is_in_onnx_export(): |
|
|
|
vis_input = vis_input.permute([0, 3, 1, 2]) |
|
|
|
hidden = encoder(vis_input) |
|
|
|
encodes.append(hidden) |
|
|
|
|
|
|
|
|
|
|
vis_inputs: List[torch.Tensor], |
|
|
|
masks: Optional[torch.Tensor] = None, |
|
|
|
memories: Optional[torch.Tensor] = None, |
|
|
|
sequence_length: int = 1, |
|
|
|
) -> Tuple[torch.Tensor, torch.Tensor, int, int, int, int]: |
|
|
|
) -> Tuple[torch.Tensor, int, int, int, int]: |
|
|
|
""" |
|
|
|
Forward pass of the Actor for inference. This is required for export to ONNX, and |
|
|
|
the inputs and outputs of this method should not be changed without a respective change |
|
|
|
|
|
|
vis_inputs: List[torch.Tensor], |
|
|
|
masks: Optional[torch.Tensor] = None, |
|
|
|
memories: Optional[torch.Tensor] = None, |
|
|
|
sequence_length: int = 1, |
|
|
|
) -> Tuple[torch.Tensor, torch.Tensor, int, int, int, int]: |
|
|
|
) -> Tuple[torch.Tensor, int, int, int, int]: |
|
|
|
dists, _ = self.get_dists( |
|
|
|
vec_inputs, vis_inputs, masks, memories, sequence_length |
|
|
|
) |
|
|
|
dists, _ = self.get_dists(vec_inputs, vis_inputs, masks, memories, 1) |
|
|
|
log_probs = dists[0].log_prob(sampled_actions) |
|
|
|
action_out = sampled_actions |
|
|
|
log_probs = dists[0].all_log_prob() |
|
|
|
action_out = dists[0].all_log_prob() |
|
|
|
sampled_actions, |
|
|
|
log_probs, |
|
|
|
action_out, |
|
|
|
self.version_number, |
|
|
|
torch.Tensor([self.network_body.memory_size]), |
|
|
|
self.is_continuous_int, |
|
|
|