浏览代码

Update tests to support pytest 5.x

Our tests were using pytest fixtures by actually calling the fixture
methods, but in newer 5.x versions of pytest this causes test failures.
The recommended method for using fixtures is dependency injection.

This change updates the relevant test fixtures to either not use
`pytest.fixture` or to use dependency injection to pass the fixture.
The version range requirements in `test_requirements.txt` were also
updated accordingly.
/asymm-envs
Jonathan Harper 5 年前
当前提交
9f166f9e
共有 6 个文件被更改,包括 40 次插入65 次删除
  1. 1
      ml-agents/mlagents/trainers/tests/test_learn.py
  2. 1
      ml-agents/mlagents/trainers/tests/test_rl_trainer.py
  3. 4
      ml-agents/mlagents/trainers/tests/test_sac.py
  4. 67
      ml-agents/mlagents/trainers/tests/test_trainer_controller.py
  5. 30
      ml-agents/mlagents/trainers/tests/test_trainer_util.py
  6. 2
      test_requirements.txt

1
ml-agents/mlagents/trainers/tests/test_learn.py


from mlagents.trainers.learn import parse_command_line
@pytest.fixture
def basic_options(extra_args=None):
extra_args = extra_args or {}
args = ["basic_path"]

1
ml-agents/mlagents/trainers/tests/test_rl_trainer.py


from mlagents.trainers.buffer import AgentBuffer
@pytest.fixture
def dummy_config():
return yaml.safe_load(
"""

4
ml-agents/mlagents/trainers/tests/test_sac.py


sess.run(run_list, feed_dict=feed_dict)
def test_sac_save_load_buffer(tmpdir):
def test_sac_save_load_buffer(tmpdir, dummy_config):
env, mock_brain, _ = mb.setup_mock_env_and_brains(
mock.Mock(),
False,

vector_obs_space=VECTOR_OBS_SPACE,
discrete_action_space=DISCRETE_ACTION_SPACE,
)
trainer_params = dummy_config()
trainer_params = dummy_config
trainer_params["summary_path"] = str(tmpdir)
trainer_params["model_path"] = str(tmpdir)
trainer_params["save_replay_buffer"] = True

67
ml-agents/mlagents/trainers/tests/test_trainer_controller.py


from unittest.mock import MagicMock, Mock, patch
import pytest
import yaml
import pytest
from mlagents.trainers.trainer_controller import TrainerController
from mlagents.trainers.subprocess_env_manager import EnvironmentStep
from mlagents.trainers.sampler_class import SamplerManager

def dummy_config():
return yaml.safe_load(
"""
default:
trainer: ppo
batch_size: 32
beta: 5.0e-3
buffer_size: 512
epsilon: 0.2
gamma: 0.99
hidden_units: 128
lambd: 0.95
learning_rate: 3.0e-4
max_steps: 5.0e4
normalize: true
num_epoch: 5
num_layers: 2
time_horizon: 64
sequence_length: 64
summary_freq: 1000
use_recurrent: false
memory_size: 8
use_curiosity: false
curiosity_strength: 0.0
curiosity_enc_size: 1
"""
)
@pytest.fixture
def basic_trainer_controller():
return TrainerController(
trainer_factory=None,

tensorflow_set_seed.assert_called_with(seed)
def trainer_controller_with_start_learning_mocks():
@pytest.fixture
def trainer_controller_with_start_learning_mocks(basic_trainer_controller):
trainer_mock = MagicMock()
trainer_mock.get_step = 0
trainer_mock.get_max_steps = 5

tc = basic_trainer_controller()
tc = basic_trainer_controller
tc.initialize_trainers = MagicMock()
tc.trainers = {"testbrain": trainer_mock}
tc.advance = MagicMock()

@patch.object(tf, "reset_default_graph")
def test_start_learning_trains_forever_if_no_train_model(tf_reset_graph):
tc, trainer_mock = trainer_controller_with_start_learning_mocks()
def test_start_learning_trains_forever_if_no_train_model(
tf_reset_graph, trainer_controller_with_start_learning_mocks
):
tc, trainer_mock = trainer_controller_with_start_learning_mocks
tc.train_model = False
tf_reset_graph.return_value = None

@patch.object(tf, "reset_default_graph")
def test_start_learning_trains_until_max_steps_then_saves(tf_reset_graph):
tc, trainer_mock = trainer_controller_with_start_learning_mocks()
def test_start_learning_trains_until_max_steps_then_saves(
tf_reset_graph, trainer_controller_with_start_learning_mocks
):
tc, trainer_mock = trainer_controller_with_start_learning_mocks
tf_reset_graph.return_value = None
brain_info_mock = MagicMock()

tc._save_model.assert_called_once()
def trainer_controller_with_take_step_mocks():
@pytest.fixture
def trainer_controller_with_take_step_mocks(basic_trainer_controller):
trainer_mock = MagicMock()
trainer_mock.get_step = 0
trainer_mock.get_max_steps = 5

tc = basic_trainer_controller()
tc = basic_trainer_controller
def test_take_step_adds_experiences_to_trainer_and_trains():
tc, trainer_mock = trainer_controller_with_take_step_mocks()
def test_take_step_adds_experiences_to_trainer_and_trains(
trainer_controller_with_take_step_mocks
):
tc, trainer_mock = trainer_controller_with_take_step_mocks
brain_name = "testbrain"
action_info_dict = {brain_name: MagicMock()}

trainer_mock.increment_step.assert_called_once()
def test_take_step_if_not_training():
tc, trainer_mock = trainer_controller_with_take_step_mocks()
def test_take_step_if_not_training(trainer_controller_with_take_step_mocks):
tc, trainer_mock = trainer_controller_with_take_step_mocks
tc.train_model = False
brain_name = "testbrain"

30
ml-agents/mlagents/trainers/tests/test_trainer_util.py


@pytest.fixture
def dummy_config_with_override():
base = dummy_config()
def dummy_config_with_override(dummy_config):
base = dummy_config
base["testbrain"] = {}
base["testbrain"]["normalize"] = False
return base

@patch("mlagents.trainers.brain.BrainParameters")
def test_initialize_trainer_parameters_override_defaults(BrainParametersMock):
def test_initialize_trainer_parameters_override_defaults(
BrainParametersMock, dummy_config_with_override
):
summaries_dir = "test_dir"
run_id = "testrun"
model_path = "model_dir"

seed = 11
expected_reward_buff_cap = 1
base_config = dummy_config_with_override()
base_config = dummy_config_with_override
expected_config = base_config["default"]
expected_config["summary_path"] = summaries_dir + f"/{run_id}_testbrain"
expected_config["model_path"] = model_path + "/testbrain"

@patch("mlagents.trainers.brain.BrainParameters")
def test_initialize_ppo_trainer(BrainParametersMock):
def test_initialize_ppo_trainer(BrainParametersMock, dummy_config):
brain_params_mock = BrainParametersMock()
BrainParametersMock.return_value.brain_name = "testbrain"
external_brains = {"testbrain": BrainParametersMock()}

seed = 11
expected_reward_buff_cap = 1
base_config = dummy_config()
base_config = dummy_config
expected_config = base_config["default"]
expected_config["summary_path"] = summaries_dir + f"/{run_id}_testbrain"
expected_config["model_path"] = model_path + "/testbrain"

@patch("mlagents.trainers.brain.BrainParameters")
def test_initialize_invalid_trainer_raises_exception(BrainParametersMock):
def test_initialize_invalid_trainer_raises_exception(
BrainParametersMock, dummy_bad_config
):
summaries_dir = "test_dir"
run_id = "testrun"
model_path = "model_dir"

seed = 11
bad_config = dummy_bad_config()
bad_config = dummy_bad_config
BrainParametersMock.return_value.brain_name = "testbrain"
external_brains = {"testbrain": BrainParametersMock()}

trainers[brain_name] = trainer_factory.generate(brain_parameters)
def test_handles_no_default_section():
def test_handles_no_default_section(dummy_config):
config = dummy_config()
no_default_config = {brain_name: config["default"]}
no_default_config = {brain_name: dummy_config["default"]}
brain_parameters = BrainParameters(
brain_name=brain_name,
vector_observation_space_size=1,

trainer_factory.generate(brain_parameters)
def test_raise_if_no_config_for_brain():
def test_raise_if_no_config_for_brain(dummy_config):
config = dummy_config()
bad_config = {"some_other_brain": config["default"]}
bad_config = {"some_other_brain": dummy_config["default"]}
brain_parameters = BrainParameters(
brain_name=brain_name,
vector_observation_space_size=1,

2
test_requirements.txt


# Test-only dependencies should go here, not in setup.py
pytest>=3.2.2,<4.0.0
pytest>=3.2.2,<6.0.0
pytest-cov==2.6.1
正在加载...
取消
保存