比较提交

...
此合并请求有变更与目标分支冲突。
/ml-agents-envs/mlagents_envs/rpc_utils.py
/ml-agents-envs/mlagents_envs/base_env.py
/ml-agents/mlagents/trainers/demo_loader.py
/ml-agents/mlagents/trainers/tests/simple_test_envs.py
/ml-agents/mlagents/trainers/tests/test_simple_rl.py
/demos/1DTestContinuous.demo
/demos/1DTestDiscrete.demo

6 次代码提交

作者 SHA1 备注 提交日期
Andrew Cohen f6d6e3d0 reccurent gail tests 5 年前
Andrew Cohen 553223e0 get_agent_id fixed 5 年前
Andrew Cohen f1eeed9c success threshold to .9 for imitation 5 年前
Andrew Cohen 7aaf1fb6 gail and bc tests 5 年前
Andrew Cohen e7836fb5 record demos 1d env 5 年前
Andrew Cohen b1cfa74d Merge branch 'master' into develop-test-imitation 5 年前
共有 7 个文件被更改,包括 1155 次插入137 次删除
  1. 70
      ml-agents-envs/mlagents_envs/rpc_utils.py
  2. 7
      ml-agents-envs/mlagents_envs/base_env.py
  3. 23
      ml-agents/mlagents/trainers/demo_loader.py
  4. 30
      ml-agents/mlagents/trainers/tests/simple_test_envs.py
  5. 359
      ml-agents/mlagents/trainers/tests/test_simple_rl.py
  6. 402
      demos/1DTestContinuous.demo
  7. 401
      demos/1DTestDiscrete.demo

70
ml-agents-envs/mlagents_envs/rpc_utils.py


from mlagents_envs.exception import UnityObservationException
from mlagents_envs.timers import hierarchical_timer, timed
from mlagents_envs.communicator_objects.agent_info_pb2 import AgentInfoProto
from mlagents_envs.communicator_objects.agent_action_pb2 import AgentActionProto
from mlagents_envs.communicator_objects.agent_info_action_pair_pb2 import (
AgentInfoActionPairProto,
)
NONE as COMPRESSION_NONE,
NONE as COMPRESSION_TYPE_NONE,
)
from mlagents_envs.communicator_objects.brain_parameters_pb2 import BrainParametersProto
import numpy as np

f"Observation did not have the expected shape - got {obs.shape} but expected {expected_shape}"
)
gray_scale = obs.shape[2] == 1
if obs.compression_type == COMPRESSION_NONE:
if obs.compression_type == COMPRESSION_TYPE_NONE:
img = np.array(obs.float_data.data, dtype=np.float32)
img = np.reshape(img, obs.shape)
return img

indices = _generate_split_indices(group_spec.discrete_action_branches)
action_mask = np.split(action_mask, indices, axis=1)
return BatchedStepResult(obs_list, rewards, done, max_step, agent_id, action_mask)
def proto_from_batched_step_result(
batched_step_result: BatchedStepResult
) -> List[AgentInfoProto]:
agent_info_protos: List[AgentInfoProto] = []
for agent_id in batched_step_result.agent_id:
agent_id_index = batched_step_result.get_index(agent_id)
reward = batched_step_result.reward[agent_id_index]
done = batched_step_result.done[agent_id_index]
max_step_reached = batched_step_result.max_step[agent_id_index]
agent_mask = None
if batched_step_result.action_mask is not None:
mask = batched_step_result.action_mask[0]
agent_mask = mask[agent_id_index]
observations: List[ObservationProto] = []
for all_observations_of_type in batched_step_result.obs:
observation = all_observations_of_type[agent_id_index]
if len(observation.shape) == 3:
observations.append(
ObservationProto(
compressed_data=observation,
shape=observation.shape,
compression_type=COMPRESSION_TYPE_NONE,
)
)
else:
observations.append(
ObservationProto(
float_data=ObservationProto.FloatData(data=observation),
shape=[len(observation)],
compression_type=COMPRESSION_TYPE_NONE,
)
)
agent_info_proto = AgentInfoProto(
reward=reward,
done=done,
id=agent_id,
max_step_reached=max_step_reached,
action_mask=agent_mask,
observations=observations,
)
agent_info_protos.append(agent_info_proto)
return agent_info_protos
# The arguments here are the BatchedStepResult and actions for a single agent name
def proto_from_batched_step_result_and_action(
batched_step_result: BatchedStepResult, actions: np.ndarray
) -> List[AgentInfoActionPairProto]:
agent_info_protos = proto_from_batched_step_result(batched_step_result)
agent_action_protos = [
AgentActionProto(vector_actions=action) for action in actions
]
agent_info_action_pair_protos = [
AgentInfoActionPairProto(agent_info=agent_info_proto, action_info=action_proto)
for agent_info_proto, action_proto in zip(
agent_info_protos, agent_action_protos
)
]
return agent_info_action_pair_protos
def _generate_split_indices(dims):

7
ml-agents-envs/mlagents_envs/base_env.py


def contains_agent(self, agent_id: AgentId) -> bool:
return agent_id in self.agent_id_to_index
def get_index(self, agent_id: AgentId) -> int:
return self.agent_id_to_index.get(agent_id, -1) # type: ignore
def get_agent_step_result(self, agent_id: AgentId) -> StepResult:
"""
returns the step result for a specific agent.

"""
if not self.contains_agent(agent_id):
raise IndexError(
"agent_id {} is not present in the BatchedStepResult".format(agent_id)
"get_agent_step_result failed. agent_id {} is not present in the BatchedStepResult".format(
agent_id
)
)
agent_index = self._agent_id_to_index[agent_id] # type: ignore
agent_obs = []

23
ml-agents/mlagents/trainers/demo_loader.py


)
from mlagents_envs.timers import timed, hierarchical_timer
from google.protobuf.internal.decoder import _DecodeVarint32 # type: ignore
from google.protobuf.internal.encoder import _EncodeVarint # type: ignore
@timed

)
INITIAL_POS = 33
@timed
def load_demonstration(
file_path: str

"""
# First 32 bytes of file dedicated to meta-data.
INITIAL_POS = 33
file_paths = get_demo_files(file_path)
group_spec = None
brain_param_proto = None

f"No BrainParameters found in demonstration file at {file_path}."
)
return group_spec, info_action_pairs, total_expected
def write_delimited(f, message):
msg_string = message.SerializeToString()
msg_size = len(msg_string)
_EncodeVarint(f.write, msg_size)
f.write(msg_string)
def write_demo(demo_path, meta_data_proto, brain_param_proto, agent_info_protos):
with open(demo_path, "wb") as f:
# write metadata
write_delimited(f, meta_data_proto)
f.seek(INITIAL_POS)
write_delimited(f, brain_param_proto)
for agent in agent_info_protos:
write_delimited(f, agent)

30
ml-agents/mlagents/trainers/tests/simple_test_envs.py


BatchedStepResult,
ActionType,
)
from mlagents_envs.rpc_utils import proto_from_batched_step_result_and_action
from mlagents_envs.communicator_objects.agent_info_action_pair_pb2 import (
AgentInfoActionPairProto,
)
OBS_SIZE = 1
VIS_OBS_SIZE = (20, 20, 3)

m_agent_id,
action_mask,
)
class Record1DEnvironment(Simple1DEnvironment):
def __init__(
self, brain_names, use_discrete, step_size=0.2, num_vector=1, n_demos=30
):
super().__init__(
brain_names, use_discrete, step_size=0.2, num_vector=num_vector
)
self.demonstration_protos: Dict[str, List[AgentInfoActionPairProto]] = {}
self.n_demos = n_demos
for name in self.names:
self.demonstration_protos[name] = []
def step(self) -> None:
super().step()
# proto_from_batched_step_result(self.step_result[name])
for name in self.names:
self.demonstration_protos[
name
] += proto_from_batched_step_result_and_action(
self.step_result[name], self.action[name]
)
self.demonstration_protos[name] = self.demonstration_protos[name][
-self.n_demos :
]

359
ml-agents/mlagents/trainers/tests/test_simple_rl.py


from mlagents.trainers.tests.simple_test_envs import (
Simple1DEnvironment,
Memory1DEnvironment,
Record1DEnvironment,
from mlagents.trainers.demo_loader import write_demo
from mlagents_envs.communicator_objects.demonstration_meta_pb2 import (
DemonstrationMetaProto,
)
from mlagents_envs.communicator_objects.brain_parameters_pb2 import BrainParametersProto
from mlagents_envs.communicator_objects.space_type_pb2 import discrete, continuous
BRAIN_NAME = "1D"

assert all(reward > success_threshold for reward in processed_rewards)
# @pytest.mark.parametrize("use_discrete", [True, False])
# def test_simple_ppo(use_discrete):
# env = Simple1DEnvironment([BRAIN_NAME], use_discrete=use_discrete)
# config = generate_config(PPO_CONFIG)
# _check_environment_trains(env, config)
#
#
# @pytest.mark.parametrize("use_discrete", [True, False])
# @pytest.mark.parametrize("num_visual", [1, 2])
# def test_visual_ppo(num_visual, use_discrete):
# env = Simple1DEnvironment(
# [BRAIN_NAME],
# use_discrete=use_discrete,
# num_visual=num_visual,
# num_vector=0,
# step_size=0.2,
# )
# override_vals = {"learning_rate": 3.0e-4}
# config = generate_config(PPO_CONFIG, override_vals)
# _check_environment_trains(env, config)
#
#
# @pytest.mark.parametrize("num_visual", [1, 2])
# @pytest.mark.parametrize("vis_encode_type", ["resnet", "nature_cnn"])
# def test_visual_advanced_ppo(vis_encode_type, num_visual):
# env = Simple1DEnvironment(
# [BRAIN_NAME],
# use_discrete=True,
# num_visual=num_visual,
# num_vector=0,
# step_size=0.5,
# vis_obs_size=(36, 36, 3),
# )
# override_vals = {
# "learning_rate": 3.0e-4,
# "vis_encode_type": vis_encode_type,
# "max_steps": 500,
# "summary_freq": 100,
# }
# config = generate_config(PPO_CONFIG, override_vals)
# # The number of steps is pretty small for these encoders
# _check_environment_trains(env, config, success_threshold=0.5)
#
#
# @pytest.mark.parametrize("use_discrete", [True, False])
# def test_recurrent_ppo(use_discrete):
# env = Memory1DEnvironment([BRAIN_NAME], use_discrete=use_discrete)
# override_vals = {
# "max_steps": 3000,
# "batch_size": 64,
# "buffer_size": 128,
# "use_recurrent": True,
# }
# config = generate_config(PPO_CONFIG, override_vals)
# _check_environment_trains(env, config)
#
#
# @pytest.mark.parametrize("use_discrete", [True, False])
# def test_simple_sac(use_discrete):
# env = Simple1DEnvironment([BRAIN_NAME], use_discrete=use_discrete)
# config = generate_config(SAC_CONFIG)
# _check_environment_trains(env, config)
#
#
# @pytest.mark.parametrize("use_discrete", [True, False])
# @pytest.mark.parametrize("num_visual", [1, 2])
# def test_visual_sac(num_visual, use_discrete):
# env = Simple1DEnvironment(
# [BRAIN_NAME],
# use_discrete=use_discrete,
# num_visual=num_visual,
# num_vector=0,
# step_size=0.2,
# )
# override_vals = {"batch_size": 16, "learning_rate": 3e-4}
# config = generate_config(SAC_CONFIG, override_vals)
# _check_environment_trains(env, config)
#
#
# @pytest.mark.parametrize("num_visual", [1, 2])
# @pytest.mark.parametrize("vis_encode_type", ["resnet", "nature_cnn"])
# def test_visual_advanced_sac(vis_encode_type, num_visual):
# env = Simple1DEnvironment(
# [BRAIN_NAME],
# use_discrete=True,
# num_visual=num_visual,
# num_vector=0,
# step_size=0.5,
# vis_obs_size=(36, 36, 3),
# )
# override_vals = {
# "batch_size": 16,
# "learning_rate": 3.0e-4,
# "vis_encode_type": vis_encode_type,
# "buffer_init_steps": 0,
# "max_steps": 100,
# }
# config = generate_config(SAC_CONFIG, override_vals)
# # The number of steps is pretty small for these encoders
# _check_environment_trains(env, config, success_threshold=0.5)
#
#
# @pytest.mark.parametrize("use_discrete", [True, False])
# def test_recurrent_sac(use_discrete):
# env = Memory1DEnvironment([BRAIN_NAME], use_discrete=use_discrete)
# override_vals = {"batch_size": 32, "use_recurrent": True, "max_steps": 2000}
# config = generate_config(SAC_CONFIG, override_vals)
# _check_environment_trains(env, config)
#
#
# @pytest.mark.parametrize("use_discrete", [True, False])
# def test_simple_ghost(use_discrete):
# env = Simple1DEnvironment(
# [BRAIN_NAME + "?team=0", BRAIN_NAME + "?team=1"], use_discrete=use_discrete
# )
# override_vals = {
# "max_steps": 2500,
# "self_play": {
# "play_against_current_self_ratio": 1.0,
# "save_steps": 2000,
# "swap_steps": 2000,
# },
# }
# config = generate_config(PPO_CONFIG, override_vals)
# _check_environment_trains(env, config)
#
#
# @pytest.mark.parametrize("use_discrete", [True, False])
# def test_simple_ghost_fails(use_discrete):
# env = Simple1DEnvironment(
# [BRAIN_NAME + "?team=0", BRAIN_NAME + "?team=1"], use_discrete=use_discrete
# )
# # This config should fail because the ghosted policy is never swapped with a competent policy.
# # Swap occurs after max step is reached.
# override_vals = {
# "max_steps": 2500,
# "self_play": {
# "play_against_current_self_ratio": 1.0,
# "save_steps": 2000,
# "swap_steps": 4000,
# },
# }
# config = generate_config(PPO_CONFIG, override_vals)
# _check_environment_trains(env, config, success_threshold=None)
# processed_rewards = [
# default_reward_processor(rewards) for rewards in env.final_rewards.values()
# ]
# success_threshold = 0.99
# assert any(reward > success_threshold for reward in processed_rewards) and any(
# reward < success_threshold for reward in processed_rewards
# )
#
#
def test_simple_ppo(use_discrete):
env = Simple1DEnvironment([BRAIN_NAME], use_discrete=use_discrete)
config = generate_config(PPO_CONFIG)
def test_simple_record(use_discrete):
env = Record1DEnvironment([BRAIN_NAME], use_discrete=use_discrete, n_demos=100)
override_vals = {"max_steps": 5000}
config = generate_config(PPO_CONFIG, override_vals)
@pytest.mark.parametrize("use_discrete", [True, False])
@pytest.mark.parametrize("num_visual", [1, 2])
def test_visual_ppo(num_visual, use_discrete):
env = Simple1DEnvironment(
[BRAIN_NAME],
use_discrete=use_discrete,
num_visual=num_visual,
num_vector=0,
step_size=0.2,
agent_info_protos = env.demonstration_protos[BRAIN_NAME]
meta_data_proto = DemonstrationMetaProto()
brain_param_proto = BrainParametersProto(
vector_action_size=[1],
vector_action_descriptions=[""],
vector_action_space_type=discrete if use_discrete else continuous,
brain_name=BRAIN_NAME,
is_training=True,
override_vals = {"learning_rate": 3.0e-4}
config = generate_config(PPO_CONFIG, override_vals)
_check_environment_trains(env, config)
action_type = "Discrete" if use_discrete else "Continuous"
demo_path = "demos/1DTest" + action_type + ".demo"
write_demo(demo_path, meta_data_proto, brain_param_proto, agent_info_protos)
@pytest.mark.parametrize("num_visual", [1, 2])
@pytest.mark.parametrize("vis_encode_type", ["resnet", "nature_cnn"])
def test_visual_advanced_ppo(vis_encode_type, num_visual):
env = Simple1DEnvironment(
[BRAIN_NAME],
use_discrete=True,
num_visual=num_visual,
num_vector=0,
step_size=0.5,
vis_obs_size=(36, 36, 3),
)
override_vals = {
"learning_rate": 3.0e-4,
"vis_encode_type": vis_encode_type,
"max_steps": 500,
"summary_freq": 100,
}
config = generate_config(PPO_CONFIG, override_vals)
# The number of steps is pretty small for these encoders
_check_environment_trains(env, config, success_threshold=0.5)
# @pytest.mark.parametrize("use_discrete", [True, False])
# @pytest.mark.parametrize("trainer_config", [PPO_CONFIG, SAC_CONFIG])
# def test_gail(use_discrete, trainer_config):
# env = Simple1DEnvironment([BRAIN_NAME], use_discrete=use_discrete)
# action_type = "Discrete" if use_discrete else "Continuous"
# demo_path = "demos/1DTest" + action_type + ".demo"
# override_vals = {
# "max_steps": 500,
# "behavioral_cloning": {"demo_path": demo_path, "strength": 1.0, "steps": 2000},
# "reward_signals": {
# "gail": {
# "strength": 1.0,
# "gamma": 0.99,
# "encoding_size": 32,
# "demo_path": demo_path,
# }
# },
# }
# config = generate_config(trainer_config, override_vals)
# _check_environment_trains(env, config, success_threshold=0.9)
def test_recurrent_ppo(use_discrete):
def test_recurrent_gail_ppo(use_discrete):
action_type = "Discrete" if use_discrete else "Continuous"
demo_path = "demos/1DTest" + action_type + ".demo"
"max_steps": 3000,
"max_steps": 1000,
}
config = generate_config(PPO_CONFIG, override_vals)
_check_environment_trains(env, config)
@pytest.mark.parametrize("use_discrete", [True, False])
def test_simple_sac(use_discrete):
env = Simple1DEnvironment([BRAIN_NAME], use_discrete=use_discrete)
config = generate_config(SAC_CONFIG)
_check_environment_trains(env, config)
@pytest.mark.parametrize("use_discrete", [True, False])
@pytest.mark.parametrize("num_visual", [1, 2])
def test_visual_sac(num_visual, use_discrete):
env = Simple1DEnvironment(
[BRAIN_NAME],
use_discrete=use_discrete,
num_visual=num_visual,
num_vector=0,
step_size=0.2,
)
override_vals = {"batch_size": 16, "learning_rate": 3e-4}
config = generate_config(SAC_CONFIG, override_vals)
_check_environment_trains(env, config)
@pytest.mark.parametrize("num_visual", [1, 2])
@pytest.mark.parametrize("vis_encode_type", ["resnet", "nature_cnn"])
def test_visual_advanced_sac(vis_encode_type, num_visual):
env = Simple1DEnvironment(
[BRAIN_NAME],
use_discrete=True,
num_visual=num_visual,
num_vector=0,
step_size=0.5,
vis_obs_size=(36, 36, 3),
)
override_vals = {
"batch_size": 16,
"learning_rate": 3.0e-4,
"vis_encode_type": vis_encode_type,
"buffer_init_steps": 0,
"max_steps": 100,
}
config = generate_config(SAC_CONFIG, override_vals)
# The number of steps is pretty small for these encoders
_check_environment_trains(env, config, success_threshold=0.5)
@pytest.mark.parametrize("use_discrete", [True, False])
def test_recurrent_sac(use_discrete):
env = Memory1DEnvironment([BRAIN_NAME], use_discrete=use_discrete)
override_vals = {"batch_size": 32, "use_recurrent": True, "max_steps": 2000}
config = generate_config(SAC_CONFIG, override_vals)
_check_environment_trains(env, config)
@pytest.mark.parametrize("use_discrete", [True, False])
def test_simple_ghost(use_discrete):
env = Simple1DEnvironment(
[BRAIN_NAME + "?team=0", BRAIN_NAME + "?team=1"], use_discrete=use_discrete
)
override_vals = {
"max_steps": 2500,
"self_play": {
"play_against_current_self_ratio": 1.0,
"save_steps": 2000,
"swap_steps": 2000,
"behavioral_cloning": {"demo_path": demo_path, "strength": 1.0, "steps": 2000},
"reward_signals": {
"gail": {
"strength": 1.0,
"gamma": 0.99,
"encoding_size": 32,
"demo_path": demo_path,
}
_check_environment_trains(env, config)
_check_environment_trains(env, config, success_threshold=0.9)
@pytest.mark.parametrize("use_discrete", [True, False])
def test_simple_ghost_fails(use_discrete):
env = Simple1DEnvironment(
[BRAIN_NAME + "?team=0", BRAIN_NAME + "?team=1"], use_discrete=use_discrete
)
# This config should fail because the ghosted policy is never swapped with a competent policy.
# Swap occurs after max step is reached.
override_vals = {
"max_steps": 2500,
"self_play": {
"play_against_current_self_ratio": 1.0,
"save_steps": 2000,
"swap_steps": 4000,
},
}
config = generate_config(PPO_CONFIG, override_vals)
_check_environment_trains(env, config, success_threshold=None)
processed_rewards = [
default_reward_processor(rewards) for rewards in env.final_rewards.values()
]
success_threshold = 0.99
assert any(reward > success_threshold for reward in processed_rewards) and any(
reward < success_threshold for reward in processed_rewards
)
# @pytest.mark.parametrize("use_discrete", [True, False])
# def test_recurrent_sac_gail(use_discrete):
# env = Memory1DEnvironment([BRAIN_NAME], use_discrete=use_discrete)
# action_type = "Discrete" if use_discrete else "Continuous"
# demo_path = "demos/1DTest" + action_type + ".demo"
# override_vals = {"batch_size": 32, "use_recurrent": True, "max_steps": 1000,
# "behavioral_cloning": {"demo_path": demo_path, "strength": 1.0, "steps": 2000},
# "reward_signals": {
# "gail": {
# "strength": 1.0,
# "gamma": 0.99,
# "encoding_size": 128,
# "demo_path": demo_path,
# }
# },
# }
# config = generate_config(SAC_CONFIG, override_vals)
# _check_environment_trains(env, config, success_threshold=0.9)

402
demos/1DTestContinuous.demo


*0:1D@
=o��P�j
"
��
#6�
=o��P�j
"
��
��
=o��P�j
"
��
��!
=sh�?@P�j
"
��
��C�
=o��P�j
"
�?
��
=o��P�j
"
�?
>(�>
=o��P�j
"
�?
��(?
=o��P�j
"
�?
��?
=o��P�j
"
�?
�L?
=o��P�j
"
�?
��<
=o��P�j
"
�?
�n ?!
=sh�?@P�j
"
�?
ӽ?
=o��P�j
"
�?
�� ?
=o��P�j
"
�?
�b#?
=o��P�j
"
�?
t;?
=o��P�j
"
�?
�%?!
=sh�?@P�j
"
�?
�?
=o��P�j
"
��
"��>
=o��P�j
"
��
�V�
=o��P�j
"
��
�2�
=o��P�j
"
��
��
=o��P�j
"
��
pJ�
=o��P�j
"
��
��!
=sh�?@P�j
"
��
g�e�
=o��P�j
"
�?
�\K�
=o��P�j
"
�?
ГV?
=o��P�j
"
�?
t a?
=o��P�j
"
�?
�8?
=o��P�j
"
�?
M�?
=o��P�j
"
�?
���=
=o��P�j
"
�?
�?!
=sh�?@P�j
"
�?
�!7?
=o��P�j
"
�?
��>
=o��P�j
"
�?
�?
=o��P�j
"
�?
�?
=o��P�j
"
�?
$�?!
=sh�?@P�j
"
�?
�?
=o��P�j
"
�?
�?
=o��P�j
"
�?
�?
=o��P�j
"
�?
g,?
=o��P�j
"
�?
�?!
=sh�?@P�j
"
�?
�;?
=o��P�j
"
�?
�?
=o��P�j
"
�?
f��>
=o��P�j
"
�?
Jx?
=o��P�j
"
�?
�%
?!
=sh�?@P�j
"
�?
�P?
=o��P�j
"
��
C�"?
=o��P�j
"
��
��
=o��P�j
"
��
��
=o��P�j
"
��
��
=o��P�j
"
��
�Uz�
=o��P�j
"
��
��!
=sh�?@P�j
"
��
O4�
=o��P�j
"
�?
��
=o��P�j
"
�?
$�>
=o��P�j
"
�?
��L?
=o��P�j
"
�?
n��>
=o��P�j
"
�?
��+?
=o��P�j
"
�?
J�?!
=sh�?@P�j
"
�?
�?
=o��P�j
"
��
��Z?
=o��P�j
"
��
�8u�
=o��P�j
"
��
��g�
=o��P�j
"
��
��
=o��P�j
"
��
��
=o��P�j
"
��
��C�!
=sh�?@P�j
"
��
��
=o��P�j
"
��
��
=o��P�j
"
��
��
=o��P�j
"
��
�K5�
=o��P�j
"
��
t�^�!
=sh�?@P�j
"
��
��
=o��P�j
"
��
��
=o��P�j
"
��
��K�
=o��P�j
"
��
��
=o��P�j
"
��
��!
=sh�?@P�j
"
��
��
=o��P�j
"
�?
��
=o��P�j
"
�?
��?
=o��P�j
"
�?
�?
=o��P�j
"
�?
~�|?
=o��P�j
"
�?
;:>?
=o��P�j
"
�?
+�\>!
=sh�?@P�j
"
�?
��X?
=o��P�j
"
�?
�bL?
=o��P�j
"
�?
0?
=o��P�j
"
�?
p�B?
=o��P�j
"
�?
�]D?!
=sh�?@P�j
"
�?
<�w?
=o��P�j
"
�?
�?
=o��P�j
"
�?
�=?
=o��P�j
"
�?
 �i?
=o��P�j
"
�?
�R~?!
=sh�?@P�j
"
�?
feA?
=o��P�j
"
�?
�?
=o��P�j
"
�?
›|?
=o��P�j
"
�?
I�>
=o��P�j
"
�?
�6?!
=sh�?@P�j
"
�?
J?

401
demos/1DTestDiscrete.demo


*:1D@#
=o��P�Zj
"
�?
�?%
=sh�?@P�Zj
"
�?
�?#
=o��P�Zj
"
�?
�?#
=o��P�Zj
"
�?
�?#
=o��P�Zj
"
�?
�?#
=o��P�Zj
"
�?
�?%
=sh�?@P�Zj
"
�?
�?#
=o��P�Zj
"
�?
�?#
=o��P�Zj
"
�?
�?#
=o��P�Zj
"
�?
�?#
=o��P�Zj
"
�?
�?%
=sh�?@P�Zj
"
�?
�?#
=o��P�Zj
"
��
�?#
=o��P�Zj
"
��
#
=o��P�Zj
"
��
#
=o��P�Zj
"
��
#
=o��P�Zj
"
��
#
=o��P�Zj
"
��
%
=sh�?@P�Zj
"
��
#
=o��P�Zj
"
�?
#
=o��P�Zj
"
�?
�?#
=o��P�Zj
"
�?
�?#
=o��P�Zj
"
�?
�?#
=o��P�Zj
"
�?
�?#
=o��P�Zj
"
�?
�?%
=sh�?@P�Zj
"
�?
�?#
=o��P�Zj
"
�?
�?#
=o��P�Zj
"
�?
�?#
=o��P�Zj
"
�?
�?#
=o��P�Zj
"
�?
�?%
=sh�?@P�Zj
"
�?
�?#
=o��P�Zj
"
��
�?#
=o��P�Zj
"
��
#
=o��P�Zj
"
��
#
=o��P�Zj
"
��
#
=o��P�Zj
"
��
#
=o��P�Zj
"
��
%
=sh�?@P�Zj
"
��
#
=o��P�Zj
"
��
#
=o��P�Zj
"
��
#
=o��P�Zj
"
��
#
=o��P�Zj
"
��
%
=sh�?@P�Zj
"
��
#
=o��P�Zj
"
�?
#
=o��P�Zj
"
�?
�?#
=o��P�Zj
"
�?
�?#
=o��P�Zj
"
�?
�?#
=o��P�Zj
"
�?
�?#
=o��P�Zj
"
�?
�?%
=sh�?@P�Zj
"
�?
�?#
=o��P�Zj
"
�?
�?#
=o��P�Zj
"
�?
�?#
=o��P�Zj
"
�?
�?#
=o��P�Zj
"
�?
�?%
=sh�?@P�Zj
"
�?
�?#
=o��P�Zj
"
��
�?#
=o��P�Zj
"
��
#
=o��P�Zj
"
��
#
=o��P�Zj
"
��
#
=o��P�Zj
"
��
#
=o��P�Zj
"
��
%
=sh�?@P�Zj
"
��
#
=o��P�Zj
"
�?
#
=o��P�Zj
"
�?
�?#
=o��P�Zj
"
�?
�?#
=o��P�Zj
"
�?
�?#
=o��P�Zj
"
�?
�?#
=o��P�Zj
"
�?
�?%
=sh�?@P�Zj
"
�?
�?#
=o��P�Zj
"
�?
�?#
=o��P�Zj
"
�?
�?#
=o��P�Zj
"
�?
�?#
=o��P�Zj
"
�?
�?%
=sh�?@P�Zj
"
�?
�?#
=o��P�Zj
"
�?
�?#
=o��P�Zj
"
�?
�?#
=o��P�Zj
"
�?
�?#
=o��P�Zj
"
�?
�?%
=sh�?@P�Zj
"
�?
�?#
=o��P�Zj
"
��
�?#
=o��P�Zj
"
��
#
=o��P�Zj
"
��
#
=o��P�Zj
"
��
#
=o��P�Zj
"
��
#
=o��P�Zj
"
��
%
=sh�?@P�Zj
"
��
#
=o��P�Zj
"
�?
#
=o��P�Zj
"
�?
�?#
=o��P�Zj
"
�?
�?#
=o��P�Zj
"
�?
�?#
=o��P�Zj
"
�?
�?#
=o��P�Zj
"
�?
�?%
=sh�?@P�Zj
"
�?
�?#
=o��P�Zj
"
��
�?#
=o��P�Zj
"
��
#
=o��P�Zj
"
��
#
=o��P�Zj
"
��
#
=o��P�Zj
"
��
#
=o��P�Zj
"
��
%
=sh�?@P�Zj
"
��

正在加载...
取消
保存