Unit tests: make set_module_args() a context manager, and remove copies of it in some tests (#9838)

Make set_module_args() a context manager, and remove copies of set_module_args().

Prepares for Data Tagging.
This commit is contained in:
Felix Fontein 2025-03-07 07:21:03 +01:00 committed by GitHub
parent 402f725424
commit a1781d09dd
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
84 changed files with 4043 additions and 4302 deletions

View file

@ -152,25 +152,25 @@ class TestPacman:
def test_fail_without_required_args(self):
with pytest.raises(AnsibleFailJson) as e:
set_module_args({})
pacman.main()
with set_module_args({}):
pacman.main()
assert e.match(r"one of the following is required")
def test_success(self, mock_empty_inventory):
set_module_args({"update_cache": True}) # Simplest args to let init go through
P = pacman.Pacman(pacman.setup_module())
with pytest.raises(AnsibleExitJson) as e:
P.success()
with set_module_args({"update_cache": True}): # Simplest args to let init go through
P = pacman.Pacman(pacman.setup_module())
with pytest.raises(AnsibleExitJson) as e:
P.success()
def test_fail(self, mock_empty_inventory):
set_module_args({"update_cache": True})
P = pacman.Pacman(pacman.setup_module())
with set_module_args({"update_cache": True}):
P = pacman.Pacman(pacman.setup_module())
args = dict(
msg="msg", stdout="something", stderr="somethingelse", cmd=["command", "with", "args"], rc=1
)
with pytest.raises(AnsibleFailJson) as e:
P.fail(**args)
args = dict(
msg="msg", stdout="something", stderr="somethingelse", cmd=["command", "with", "args"], rc=1
)
with pytest.raises(AnsibleFailJson) as e:
P.fail(**args)
assert all(item in e.value.args[0] for item in args)
@ -333,33 +333,33 @@ class TestPacman:
def test_build_inventory(self, expected, run_command_side_effect, raises):
self.mock_run_command.side_effect = run_command_side_effect
set_module_args({"update_cache": True})
if raises:
with pytest.raises(raises):
with set_module_args({"update_cache": True}):
if raises:
with pytest.raises(raises):
P = pacman.Pacman(pacman.setup_module())
P._build_inventory()
else:
P = pacman.Pacman(pacman.setup_module())
P._build_inventory()
else:
P = pacman.Pacman(pacman.setup_module())
assert P._build_inventory() == expected
assert P._build_inventory() == expected
@pytest.mark.parametrize("check_mode_value", [True, False])
def test_upgrade_check_empty_inventory(self, mock_empty_inventory, check_mode_value):
set_module_args({"upgrade": True, "_ansible_check_mode": check_mode_value})
P = pacman.Pacman(pacman.setup_module())
with pytest.raises(AnsibleExitJson) as e:
P.run()
self.mock_run_command.call_count == 0
with set_module_args({"upgrade": True, "_ansible_check_mode": check_mode_value}):
P = pacman.Pacman(pacman.setup_module())
with pytest.raises(AnsibleExitJson) as e:
P.run()
assert self.mock_run_command.call_count == 0
out = e.value.args[0]
assert "packages" not in out
assert not out["changed"]
assert "diff" not in out
def test_update_db_check(self, mock_empty_inventory):
set_module_args({"update_cache": True, "_ansible_check_mode": True})
P = pacman.Pacman(pacman.setup_module())
with set_module_args({"update_cache": True, "_ansible_check_mode": True}):
P = pacman.Pacman(pacman.setup_module())
with pytest.raises(AnsibleExitJson) as e:
P.run()
with pytest.raises(AnsibleExitJson) as e:
P.run()
self.mock_run_command.call_count == 0
out = e.value.args[0]
assert "packages" not in out
@ -422,14 +422,14 @@ class TestPacman:
def test_update_db(self, module_args, expected_calls, changed):
args = {"update_cache": True}
args.update(module_args)
set_module_args(args)
with set_module_args(args):
self.mock_run_command.side_effect = [
(rc, stdout, stderr) for expected_call, kwargs, rc, stdout, stderr in expected_calls
]
with pytest.raises(AnsibleExitJson) as e:
P = pacman.Pacman(pacman.setup_module())
P.run()
self.mock_run_command.side_effect = [
(rc, stdout, stderr) for expected_call, kwargs, rc, stdout, stderr in expected_calls
]
with pytest.raises(AnsibleExitJson) as e:
P = pacman.Pacman(pacman.setup_module())
P.run()
self.mock_run_command.assert_has_calls([
mock.call(mock.ANY, expected_call, **kwargs) for expected_call, kwargs, rc, stdout, stderr in expected_calls
@ -475,16 +475,16 @@ class TestPacman:
args = {"upgrade": True, "_ansible_check_mode": check_mode_value}
if upgrade_extra_args:
args["upgrade_extra_args"] = upgrade_extra_args
set_module_args(args)
with set_module_args(args):
if run_command_data and "return_value" in run_command_data:
self.mock_run_command.return_value = run_command_data["return_value"]
if run_command_data and "return_value" in run_command_data:
self.mock_run_command.return_value = run_command_data["return_value"]
P = pacman.Pacman(pacman.setup_module())
P = pacman.Pacman(pacman.setup_module())
with pytest.raises(AnsibleExitJson) as e:
P.run()
out = e.value.args[0]
with pytest.raises(AnsibleExitJson) as e:
P.run()
out = e.value.args[0]
if check_mode_value:
self.mock_run_command.call_count == 0
@ -499,13 +499,13 @@ class TestPacman:
assert out["diff"]["before"] and out["diff"]["after"]
def test_upgrade_fail(self, mock_valid_inventory):
set_module_args({"upgrade": True})
self.mock_run_command.return_value = [1, "stdout", "stderr"]
P = pacman.Pacman(pacman.setup_module())
with set_module_args({"upgrade": True}):
self.mock_run_command.return_value = [1, "stdout", "stderr"]
P = pacman.Pacman(pacman.setup_module())
with pytest.raises(AnsibleFailJson) as e:
P.run()
self.mock_run_command.call_count == 1
with pytest.raises(AnsibleFailJson) as e:
P.run()
assert self.mock_run_command.call_count == 1
out = e.value.args[0]
assert out["failed"]
assert out["stdout"] == "stdout"
@ -633,19 +633,19 @@ class TestPacman:
def test_package_list(
self, mock_valid_inventory, state, pkg_names, expected, run_command_data, raises
):
set_module_args({"name": pkg_names, "state": state})
P = pacman.Pacman(pacman.setup_module())
P.inventory = P._build_inventory()
if run_command_data:
self.mock_run_command.side_effect = run_command_data["side_effect"]
if raises:
with pytest.raises(raises):
P.package_list()
else:
assert sorted(P.package_list()) == sorted(expected)
with set_module_args({"name": pkg_names, "state": state}):
P = pacman.Pacman(pacman.setup_module())
P.inventory = P._build_inventory()
if run_command_data:
assert self.mock_run_command.mock_calls == run_command_data["calls"]
self.mock_run_command.side_effect = run_command_data["side_effect"]
if raises:
with pytest.raises(raises):
P.package_list()
else:
assert sorted(P.package_list()) == sorted(expected)
if run_command_data:
assert self.mock_run_command.mock_calls == run_command_data["calls"]
@pytest.mark.parametrize("check_mode_value", [True, False])
@pytest.mark.parametrize(
@ -658,11 +658,11 @@ class TestPacman:
def test_op_packages_nothing_to_do(
self, mock_valid_inventory, mock_package_list, check_mode_value, name, state, package_list
):
set_module_args({"name": name, "state": state, "_ansible_check_mode": check_mode_value})
mock_package_list.return_value = package_list
P = pacman.Pacman(pacman.setup_module())
with pytest.raises(AnsibleExitJson) as e:
P.run()
with set_module_args({"name": name, "state": state, "_ansible_check_mode": check_mode_value}):
mock_package_list.return_value = package_list
P = pacman.Pacman(pacman.setup_module())
with pytest.raises(AnsibleExitJson) as e:
P.run()
out = e.value.args[0]
assert not out["changed"]
assert "packages" in out
@ -1079,13 +1079,13 @@ class TestPacman:
run_command_data,
raises,
):
set_module_args(module_args)
self.mock_run_command.side_effect = run_command_data["side_effect"]
mock_package_list.return_value = package_list_out
with set_module_args(module_args):
self.mock_run_command.side_effect = run_command_data["side_effect"]
mock_package_list.return_value = package_list_out
P = pacman.Pacman(pacman.setup_module())
with pytest.raises(raises) as e:
P.run()
P = pacman.Pacman(pacman.setup_module())
with pytest.raises(raises) as e:
P.run()
out = e.value.args[0]
assert self.mock_run_command.mock_calls == run_command_data["calls"]