浏览代码

Got 2 modes on Wlker I think

/exp-diverse-behavior
vincentpierre 4 年前
当前提交
5985959d
共有 7 个文件被更改,包括 1252 次插入57 次删除
  1. 25
      Project/Assets/ML-Agents/Examples/Walker/Prefabs/Ragdoll/WalkerRagdoll.prefab
  2. 153
      Project/Assets/ML-Agents/Examples/Walker/Scenes/Walker.unity
  3. 2
      config/sac/Walker.yaml
  4. 111
      ml-agents/mlagents/trainers/sac/optimizer_torch.py
  5. 3
      ml-agents/mlagents/trainers/settings.py
  6. 1001
      Project/Assets/ML-Agents/Examples/Walker/TFModels/mede-walker-crazy-mutual-10000-nogound-penalty.onnx
  7. 14
      Project/Assets/ML-Agents/Examples/Walker/TFModels/mede-walker-crazy-mutual-10000-nogound-penalty.onnx.meta

25
Project/Assets/ML-Agents/Examples/Walker/Prefabs/Ragdoll/WalkerRagdoll.prefab


m_EditorClassIdentifier:
agent: {fileID: 0}
agentDoneOnGroundContact: 1
penalizeGroundContact: 1
penalizeGroundContact: 0
groundContactPenalty: -1
touchingGround: 0
--- !u!1 &895268871289741235

armR: {fileID: 7933235355057813930}
forearmR: {fileID: 7933235353195701980}
handR: {fileID: 7933235354616748502}
m_DiversitySetting: 0
--- !u!114 &895268871377934303
MonoBehaviour:
m_ObjectHideFlags: 0

m_EditorClassIdentifier:
agent: {fileID: 0}
agentDoneOnGroundContact: 1
penalizeGroundContact: 1
penalizeGroundContact: 0
groundContactPenalty: -1
touchingGround: 0
--- !u!136 &7933235353030744117

m_EditorClassIdentifier:
agent: {fileID: 0}
agentDoneOnGroundContact: 1
penalizeGroundContact: 1
penalizeGroundContact: 0
groundContactPenalty: -1
touchingGround: 0
--- !u!135 &7933235353041637845

m_EditorClassIdentifier:
agent: {fileID: 0}
agentDoneOnGroundContact: 1
penalizeGroundContact: 1
penalizeGroundContact: 0
groundContactPenalty: -1
touchingGround: 0
--- !u!136 &7933235353195701957

m_EditorClassIdentifier:
agent: {fileID: 0}
agentDoneOnGroundContact: 1
penalizeGroundContact: 1
penalizeGroundContact: 0
groundContactPenalty: -1
touchingGround: 0
--- !u!136 &7933235353228551178

m_EditorClassIdentifier:
agent: {fileID: 0}
agentDoneOnGroundContact: 1
penalizeGroundContact: 1
penalizeGroundContact: 0
groundContactPenalty: -1
touchingGround: 0
--- !u!136 &7933235353240438144

m_EditorClassIdentifier:
agent: {fileID: 0}
agentDoneOnGroundContact: 1
penalizeGroundContact: 1
penalizeGroundContact: 0
groundContactPenalty: -1
touchingGround: 0
--- !u!136 &7933235353713167634

m_EditorClassIdentifier:
agent: {fileID: 0}
agentDoneOnGroundContact: 1
penalizeGroundContact: 1
penalizeGroundContact: 0
groundContactPenalty: -1
touchingGround: 0
--- !u!135 &7933235354074184676

m_EditorClassIdentifier:
agent: {fileID: 0}
agentDoneOnGroundContact: 1
penalizeGroundContact: 1
penalizeGroundContact: 0
groundContactPenalty: -1
touchingGround: 0
--- !u!135 &7933235354616748520

m_EditorClassIdentifier:
agent: {fileID: 0}
agentDoneOnGroundContact: 1
penalizeGroundContact: 1
penalizeGroundContact: 0
groundContactPenalty: -1
touchingGround: 0
--- !u!136 &7933235354652902042

m_EditorClassIdentifier:
agent: {fileID: 0}
agentDoneOnGroundContact: 1
penalizeGroundContact: 1
penalizeGroundContact: 0
groundContactPenalty: -1
touchingGround: 0
--- !u!136 &7933235354845945040

m_EditorClassIdentifier:
agent: {fileID: 0}
agentDoneOnGroundContact: 1
penalizeGroundContact: 1
penalizeGroundContact: 0
groundContactPenalty: -1
touchingGround: 0
--- !u!136 &7933235355057813907

153
Project/Assets/ML-Agents/Examples/Walker/Scenes/Walker.unity


m_ReflectionIntensity: 1
m_CustomReflection: {fileID: 0}
m_Sun: {fileID: 0}
m_IndirectSpecularColor: {r: 0.4497121, g: 0.49977785, b: 0.57563704, a: 1}
m_IndirectSpecularColor: {r: 0.44971168, g: 0.4997775, b: 0.57563686, a: 1}
m_UseRadianceAmbientProbe: 0
--- !u!157 &3
LightmapSettings:

m_Modification:
m_TransformParent: {fileID: 0}
m_Modifications:
- target: {fileID: 443215535079112744, guid: 84359146bf7af47e58c229d877e801d7,
type: 3}
propertyPath: m_TargetWalkingSpeed
value: 5
objectReference: {fileID: 0}
- target: {fileID: 443215535079112744, guid: 84359146bf7af47e58c229d877e801d7,
type: 3}
propertyPath: randomizeWalkSpeedEachEpisode
value: 0
objectReference: {fileID: 0}
- target: {fileID: 1076680649171575083, guid: 84359146bf7af47e58c229d877e801d7,
type: 3}
propertyPath: m_ConnectedAnchor.x

propertyPath: m_Name
value: PlatformDynamicTarget (4)
objectReference: {fileID: 0}
- target: {fileID: 6713178126238440196, guid: 84359146bf7af47e58c229d877e801d7,
type: 3}
propertyPath: m_IsActive
value: 1
objectReference: {fileID: 0}
- target: {fileID: 6718791046026642300, guid: 84359146bf7af47e58c229d877e801d7,
type: 3}
propertyPath: m_RootOrder

m_Modification:
m_TransformParent: {fileID: 0}
m_Modifications:
- target: {fileID: 443215535079112744, guid: 84359146bf7af47e58c229d877e801d7,
type: 3}
propertyPath: m_TargetWalkingSpeed
value: 5
objectReference: {fileID: 0}
- target: {fileID: 443215535079112744, guid: 84359146bf7af47e58c229d877e801d7,
type: 3}
propertyPath: randomizeWalkSpeedEachEpisode
value: 0
objectReference: {fileID: 0}
- target: {fileID: 1076680649171575083, guid: 84359146bf7af47e58c229d877e801d7,
type: 3}
propertyPath: m_ConnectedAnchor.x

propertyPath: m_Name
value: PlatformDynamicTarget (2)
objectReference: {fileID: 0}
- target: {fileID: 6713178126238440196, guid: 84359146bf7af47e58c229d877e801d7,
type: 3}
propertyPath: m_IsActive
value: 1
objectReference: {fileID: 0}
- target: {fileID: 6718791046026642300, guid: 84359146bf7af47e58c229d877e801d7,
type: 3}
propertyPath: m_RootOrder

m_Modification:
m_TransformParent: {fileID: 0}
m_Modifications:
- target: {fileID: 443215535079112744, guid: 84359146bf7af47e58c229d877e801d7,
type: 3}
propertyPath: m_TargetWalkingSpeed
value: 5
objectReference: {fileID: 0}
- target: {fileID: 443215535079112744, guid: 84359146bf7af47e58c229d877e801d7,
type: 3}
propertyPath: randomizeWalkSpeedEachEpisode
value: 0
objectReference: {fileID: 0}
- target: {fileID: 1076680649171575083, guid: 84359146bf7af47e58c229d877e801d7,
type: 3}
propertyPath: m_ConnectedAnchor.x

propertyPath: m_Name
value: PlatformDynamicTarget (6)
objectReference: {fileID: 0}
- target: {fileID: 6713178126238440196, guid: 84359146bf7af47e58c229d877e801d7,
type: 3}
propertyPath: m_IsActive
value: 1
objectReference: {fileID: 0}
- target: {fileID: 6718791046026642300, guid: 84359146bf7af47e58c229d877e801d7,
type: 3}
propertyPath: m_RootOrder

m_Modification:
m_TransformParent: {fileID: 0}
m_Modifications:
- target: {fileID: 443215535079112744, guid: 84359146bf7af47e58c229d877e801d7,
type: 3}
propertyPath: m_TargetWalkingSpeed
value: 5
objectReference: {fileID: 0}
- target: {fileID: 443215535079112744, guid: 84359146bf7af47e58c229d877e801d7,
type: 3}
propertyPath: randomizeWalkSpeedEachEpisode
value: 0
objectReference: {fileID: 0}
- target: {fileID: 1076680649171575083, guid: 84359146bf7af47e58c229d877e801d7,
type: 3}
propertyPath: m_ConnectedAnchor.x

propertyPath: m_Name
value: PlatformDynamicTarget (9)
objectReference: {fileID: 0}
- target: {fileID: 6713178126238440196, guid: 84359146bf7af47e58c229d877e801d7,
type: 3}
propertyPath: m_IsActive
value: 1
objectReference: {fileID: 0}
- target: {fileID: 6718791046026642300, guid: 84359146bf7af47e58c229d877e801d7,
type: 3}
propertyPath: m_RootOrder

m_Modification:
m_TransformParent: {fileID: 0}
m_Modifications:
- target: {fileID: 443215535079112744, guid: 84359146bf7af47e58c229d877e801d7,
type: 3}
propertyPath: m_TargetWalkingSpeed
value: 5
objectReference: {fileID: 0}
- target: {fileID: 443215535079112744, guid: 84359146bf7af47e58c229d877e801d7,
type: 3}
propertyPath: randomizeWalkSpeedEachEpisode
value: 0
objectReference: {fileID: 0}
- target: {fileID: 1076680649171575083, guid: 84359146bf7af47e58c229d877e801d7,
type: 3}
propertyPath: m_ConnectedAnchor.x

type: 3}
propertyPath: m_Name
value: PlatformDynamicTarget (8)
objectReference: {fileID: 0}
- target: {fileID: 6713178126238440196, guid: 84359146bf7af47e58c229d877e801d7,
type: 3}
propertyPath: m_IsActive
value: 1
objectReference: {fileID: 0}
- target: {fileID: 6718791046026642300, guid: 84359146bf7af47e58c229d877e801d7,
type: 3}

m_Modification:
m_TransformParent: {fileID: 0}
m_Modifications:
- target: {fileID: 443215535079112744, guid: 84359146bf7af47e58c229d877e801d7,
type: 3}
propertyPath: m_TargetWalkingSpeed
value: 5
objectReference: {fileID: 0}
- target: {fileID: 443215535079112744, guid: 84359146bf7af47e58c229d877e801d7,
type: 3}
propertyPath: randomizeWalkSpeedEachEpisode
value: 0
objectReference: {fileID: 0}
- target: {fileID: 1076680649171575083, guid: 84359146bf7af47e58c229d877e801d7,
type: 3}
propertyPath: m_ConnectedAnchor.x

propertyPath: m_Name
value: PlatformDynamicTarget (3)
objectReference: {fileID: 0}
- target: {fileID: 6713178126238440196, guid: 84359146bf7af47e58c229d877e801d7,
type: 3}
propertyPath: m_IsActive
value: 1
objectReference: {fileID: 0}
- target: {fileID: 6718791046026642300, guid: 84359146bf7af47e58c229d877e801d7,
type: 3}
propertyPath: m_RootOrder

m_Modification:
m_TransformParent: {fileID: 0}
m_Modifications:
- target: {fileID: 443215535079112744, guid: 84359146bf7af47e58c229d877e801d7,
type: 3}
propertyPath: m_TargetWalkingSpeed
value: 5
objectReference: {fileID: 0}
- target: {fileID: 443215535079112744, guid: 84359146bf7af47e58c229d877e801d7,
type: 3}
propertyPath: randomizeWalkSpeedEachEpisode
value: 0
objectReference: {fileID: 0}
objectReference: {fileID: 0}
- target: {fileID: 6713178126238440196, guid: 84359146bf7af47e58c229d877e801d7,
type: 3}
propertyPath: m_IsActive
value: 1
objectReference: {fileID: 0}
- target: {fileID: 6718791046026642300, guid: 84359146bf7af47e58c229d877e801d7,
type: 3}

m_Modification:
m_TransformParent: {fileID: 0}
m_Modifications:
- target: {fileID: 443215535079112744, guid: 84359146bf7af47e58c229d877e801d7,
type: 3}
propertyPath: m_TargetWalkingSpeed
value: 5
objectReference: {fileID: 0}
- target: {fileID: 443215535079112744, guid: 84359146bf7af47e58c229d877e801d7,
type: 3}
propertyPath: randomizeWalkSpeedEachEpisode
value: 0
objectReference: {fileID: 0}
- target: {fileID: 1076680649171575083, guid: 84359146bf7af47e58c229d877e801d7,
type: 3}
propertyPath: m_ConnectedAnchor.x

type: 3}
propertyPath: m_Name
value: PlatformDynamicTarget (5)
objectReference: {fileID: 0}
- target: {fileID: 6713178126238440196, guid: 84359146bf7af47e58c229d877e801d7,
type: 3}
propertyPath: m_IsActive
value: 1
objectReference: {fileID: 0}
- target: {fileID: 6718791046026642300, guid: 84359146bf7af47e58c229d877e801d7,
type: 3}

m_Modification:
m_TransformParent: {fileID: 0}
m_Modifications:
- target: {fileID: 443215535079112744, guid: 84359146bf7af47e58c229d877e801d7,
type: 3}
propertyPath: m_TargetWalkingSpeed
value: 5
objectReference: {fileID: 0}
- target: {fileID: 443215535079112744, guid: 84359146bf7af47e58c229d877e801d7,
type: 3}
propertyPath: randomizeWalkSpeedEachEpisode
value: 0
objectReference: {fileID: 0}
- target: {fileID: 1076680649171575083, guid: 84359146bf7af47e58c229d877e801d7,
type: 3}
propertyPath: m_ConnectedAnchor.x

propertyPath: m_Name
value: PlatformDynamicTarget (7)
objectReference: {fileID: 0}
- target: {fileID: 6713178126238440196, guid: 84359146bf7af47e58c229d877e801d7,
type: 3}
propertyPath: m_IsActive
value: 1
objectReference: {fileID: 0}
- target: {fileID: 6718791046026642300, guid: 84359146bf7af47e58c229d877e801d7,
type: 3}
propertyPath: m_RootOrder

m_Modification:
m_TransformParent: {fileID: 0}
m_Modifications:
- target: {fileID: 443215535079112744, guid: 84359146bf7af47e58c229d877e801d7,
type: 3}
propertyPath: m_TargetWalkingSpeed
value: 5
objectReference: {fileID: 0}
- target: {fileID: 443215535079112744, guid: 84359146bf7af47e58c229d877e801d7,
type: 3}
propertyPath: randomizeWalkSpeedEachEpisode
value: 0
objectReference: {fileID: 0}
- target: {fileID: 6713178126238440196, guid: 84359146bf7af47e58c229d877e801d7,
type: 3}
propertyPath: m_Name

propertyPath: m_LocalEulerAnglesHint.z
value: 0
objectReference: {fileID: 0}
- target: {fileID: 7819659174506835736, guid: 84359146bf7af47e58c229d877e801d7,
type: 3}
propertyPath: m_Model
value:
objectReference: {fileID: 5022602860645237092, guid: 835f8eb9071634671bcaced1cea2136d,
type: 3}
m_RemovedComponents: []
m_SourcePrefab: {fileID: 100100000, guid: 84359146bf7af47e58c229d877e801d7, type: 3}

2
config/sac/Walker.yaml


gamma: 0.995
strength: 0.1
keep_checkpoints: 5
max_steps: 15000000
max_steps: 150000000
time_horizon: 1000
summary_freq: 30000

111
ml-agents/mlagents/trainers/sac/optimizer_torch.py


from mlagents.trainers.torch.layers import linear_layer, Initialization
STRENGTH = 1.0
STRENGTH = 1.0
mutual_information = 100#0.5
mutual_information = 10000 # 0.5
EPSILON = 1e-7
initial_beta = 0.0

sigma_start = 0.5
print("VARIATIONAL : Settings : strength:", self.STRENGTH, " use_actions:", self._use_actions, " mutual_information : ", self.mutual_information, "Sigma_Start : ", sigma_start)
print(
"VARIATIONAL : Settings : strength:",
self.STRENGTH,
" use_actions:",
self._use_actions,
" mutual_information : ",
self.mutual_information,
"Sigma_Start : ",
sigma_start,
)
# state_encoder_settings = settings
state_encoder_settings = NetworkSettings(normalize=True, num_layers=1)
if state_encoder_settings.memory is not None:

self._encoder = NetworkBody(new_spec, state_encoder_settings)
self._z_sigma = torch.nn.Parameter(
sigma_start * torch.ones((self.z_size), dtype=torch.float), requires_grad=True
)
sigma_start * torch.ones((self.z_size), dtype=torch.float),
requires_grad=True,
)
# self._z_mu_layer = linear_layer(
# state_encoder_settings.hidden_units,
# self.z_size,

torch.tensor(self.initial_beta, dtype=torch.float), requires_grad=False
)
self._last_layer = torch.nn.Linear(
self.z_size, self.diverse_size
)
self._last_layer = torch.nn.Linear(self.z_size, self.diverse_size)
self._diverse_index = -1
self._max_index = len(specs.observation_specs)
for i, spec in enumerate(specs.observation_specs):

def predict(self, obs_input, action_input, detach_action=False, var_noise=True) -> torch.Tensor:
def predict(
self, obs_input, action_input, detach_action=False, var_noise=True
) -> torch.Tensor:
# Convert to tensors
tensor_obs = [
obs

if self._use_actions:
action = self._action_flattener.forward(action_input).reshape(-1, self._action_flattener.flattened_size)
action = self._action_flattener.forward(action_input).reshape(
-1, self._action_flattener.flattened_size
)
if detach_action:
action = action.detach()
hidden, _ = self._encoder.forward(tensor_obs, action)

# add a VAE (like in VAIL ?)
# z_mu = self._z_mu_layer(hidden)
z_mu = hidden#self._z_mu_layer(hidden)
z_mu = hidden # self._z_mu_layer(hidden)
hidden = torch.normal(z_mu, self._z_sigma * var_noise)
prediction = torch.softmax(self._last_layer(hidden), dim=1)

self._encoder.processors[0].copy_normalization(thing.processors[1])
def rewards(self, obs_input, action_input, detach_action=False, var_noise=True) -> torch.Tensor:
def rewards(
self, obs_input, action_input, detach_action=False, var_noise=True
) -> torch.Tensor:
rewards = torch.log(torch.sum((prediction * truth), dim=1) + self.EPSILON)
rewards = torch.log(
torch.sum((prediction * truth), dim=1) + self.EPSILON
)# - np.log(1 / self.diverse_size) # Center around 0
def loss(self, obs_input, action_input, masks, detach_action=True, var_noise=True) -> torch.Tensor:
def loss(
self, obs_input, action_input, masks, detach_action=True, var_noise=True
) -> torch.Tensor:
base_loss = - ModelUtils.masked_mean(
self.rewards(obs_input, action_input, detach_action, var_noise) , masks
base_loss = -ModelUtils.masked_mean(
self.rewards(obs_input, action_input, detach_action, var_noise), masks
-torch.sum(
1
+ (self._z_sigma ** 2).log()
- 0.5 * mu ** 2
# - 0.5 * mu_expert ** 2
- (self._z_sigma ** 2),
dim=1,
), masks
)
-torch.sum(
1 + (self._z_sigma ** 2).log() - 0.5 * mu ** 2
# - 0.5 * mu_expert ** 2
- (self._z_sigma ** 2),
dim=1,
),
masks,
)
vail_loss = self._beta * (kl_loss - self.mutual_information)
with torch.no_grad():
self._beta.data = torch.max(

if spec.observation_type != ObservationType.GOAL_SIGNAL
]
if self._use_actions:
action = self._action_flattener.forward(action_input).reshape(-1, self._action_flattener.flattened_size)
action = self._action_flattener.forward(action_input).reshape(
-1, self._action_flattener.flattened_size
)
if detach_action:
action = action.detach()
hidden, _ = self._encoder.forward(tensor_obs, action)

def copy_normalization(self, thing):
self._encoder.processors[0].copy_normalization(thing.processors[1])
def rewards(self, obs_input, action_input, detach_action=False, var_noise=False) -> torch.Tensor:
def rewards(
self, obs_input, action_input, detach_action=False, var_noise=False
) -> torch.Tensor:
truth = obs_input[self._diverse_index]
prediction = self.predict(obs_input, action_input, detach_action)
rewards = torch.log(torch.sum((prediction * truth), dim=1) + self.EPSILON)

# print( ">>> ",obs_input[self._diverse_index][0],self.predict(obs_input, action_input, detach_action)[0], self.predict([x*0 for x in obs_input], action_input, detach_action * 0)[0] )
return - ModelUtils.masked_mean(
self.rewards(obs_input, action_input, detach_action) , masks
return -ModelUtils.masked_mean(
self.rewards(obs_input, action_input, detach_action), masks
)

with torch.no_grad():
v_backup = (
min_policy_qs[name]
- torch.sum(_cont_ent_coef * log_probs.continuous_tensor, dim=1)
- _cont_ent_coef * torch.sum( log_probs.continuous_tensor, dim=1)
+ self._mede_network.STRENGTH
* self._mede_network.rewards(obs, act, var_noise=False)
)

)
for name in values.keys():
with torch.no_grad():
v_backup = min_policy_qs[name] - torch.mean(
branched_ent_bonus, axis=0
) + self._mede_network.STRENGTH * self._mede_network.rewards(obs, act)
v_backup = (
min_policy_qs[name]
- torch.mean(branched_ent_bonus, axis=0)
+ self._mede_network.STRENGTH
* self._mede_network.rewards(obs, act)
)
print("The discrete case is much more complicated than that")
# Add continuous entropy bonus to minimum Q
if self._action_spec.continuous_size > 0:

all_mean_q1 = mean_q1
if self._action_spec.continuous_size > 0:
cont_log_probs = log_probs.continuous_tensor
batch_policy_loss += torch.mean(
_cont_ent_coef * cont_log_probs - all_mean_q1.unsqueeze(1), dim=1
)
batch_policy_loss += - self._mede_network.STRENGTH * self._mede_network.rewards(obs, act, var_noise=False)
batch_policy_loss += _cont_ent_coef * torch.sum(cont_log_probs, dim=1) - all_mean_q1.unsqueeze(1)
batch_policy_loss += -self._mede_network.STRENGTH * self._mede_network.rewards(
obs, act, var_noise=False
)
policy_loss = ModelUtils.masked_mean(batch_policy_loss, loss_masks)
return policy_loss

if self._action_spec.continuous_size > 0:
with torch.no_grad():
cont_log_probs = log_probs.continuous_tensor
target_current_diff = torch.sum(
cont_log_probs, dim=1) + 10 * self.target_entropy.continuous
target_current_diff = (
torch.sum(cont_log_probs, dim=1)
+ self.target_entropy.continuous
)
# print(self.target_entropy.continuous, cont_log_probs, torch.sum(
# cont_log_probs, dim=1) + self.target_entropy.continuous)
# We update all the _cont_ent_coef as one block

total_value_loss.backward()
self.value_optimizer.step()
mede_loss, base_loss, kl_loss, vail_loss, beta = self._mede_network.loss(current_obs, sampled_actions, masks)
mede_loss, base_loss, kl_loss, vail_loss, beta = self._mede_network.loss(
current_obs, sampled_actions, masks
)
# mede_loss = self._mede_network.loss(current_obs, sampled_actions, masks)
ModelUtils.update_learning_rate(self._mede_optimizer, decay_lr)
self._mede_optimizer.zero_grad()

3
ml-agents/mlagents/trainers/settings.py


GAIL: str = "gail"
CURIOSITY: str = "curiosity"
RND: str = "rnd"
DIVERSE:str = "diverse"
DIVERSE: str = "diverse"
def to_settings(self) -> type:
_mapping = {

class RNDSettings(RewardSignalSettings):
learning_rate: float = 1e-4
encoding_size: Optional[int] = None
@attr.s(auto_attribs=True)
class DiverseSettings(RewardSignalSettings):

1001
Project/Assets/ML-Agents/Examples/Walker/TFModels/mede-walker-crazy-mutual-10000-nogound-penalty.onnx
文件差异内容过多而无法显示
查看文件

14
Project/Assets/ML-Agents/Examples/Walker/TFModels/mede-walker-crazy-mutual-10000-nogound-penalty.onnx.meta


fileFormatVersion: 2
guid: 7ce5ceea4d8ff4d4a837a510deed0b0e
ScriptedImporter:
internalIDToNameTable: []
externalObjects: {}
serializedVersion: 2
userData:
assetBundleName:
assetBundleVariant:
script: {fileID: 11500000, guid: 683b6cb6d0a474744822c888b46772c9, type: 3}
optimizeModel: 1
forceArbitraryBatchSize: 1
treatErrorsAsWarnings: 0
importMode: 1
正在加载...
取消
保存