Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_sleep_after(self, fulltest, mockargs, includes):
"""Should sleep with delay_after in stage spec"""
fulltest["stages"][0]["delay_after"] = 2
mock_response = Mock(**mockargs)
with patch(
"tavern._plugins.rest.request.requests.Session.request",
return_value=mock_response,
) as pmock:
with patch("tavern.util.delay.time.sleep") as smock:
run_test("heif", fulltest, includes)
assert pmock.called
smock.assert_called_with(2)
""" Raises error if not defined
"""
mock_response = Mock(**mockargs)
stage_includes = []
newtest = deepcopy(fulltest)
newtest["includes"] = stage_includes
newtest["stages"].insert(0, {"type": "ref", "id": "my_external_stage"})
with pytest.raises(exceptions.InvalidStageReferenceError):
with patch(
"tavern._plugins.rest.request.requests.Session.request",
return_value=mock_response,
):
run_test("heif", newtest, includes)
def test_sleep_before(self, fulltest, mockargs, includes):
"""Should sleep with delay_before in stage spec"""
fulltest["stages"][0]["delay_before"] = 2
mock_response = Mock(**mockargs)
with patch(
"tavern._plugins.rest.request.requests.Session.request",
return_value=mock_response,
) as pmock:
with patch("tavern.util.delay.time.sleep") as smock:
run_test("heif", fulltest, includes)
assert pmock.called
smock.assert_called_with(2)
def test_repeats_twice_and_succeeds(self, fulltest, mockargs, includes):
fulltest["stages"][0]["max_retries"] = 1
failed_mockargs = deepcopy(mockargs)
failed_mockargs["status_code"] = 400
mock_responses = [Mock(**failed_mockargs), Mock(**mockargs)]
with patch(
"tavern._plugins.rest.request.requests.Session.request",
side_effect=mock_responses,
) as pmock:
run_test("heif", fulltest, includes)
assert pmock.call_count == 2
"""
mock_response = Mock(**mockargs)
stage_includes = []
newtest = deepcopy(fulltest)
newtest["includes"] = stage_includes
newtest["stages"].insert(0, {"type": "ref", "id": "my_external_stage"})
includes["stages"] = fake_stages
with patch(
"tavern._plugins.rest.request.requests.Session.request",
return_value=mock_response,
) as pmock:
run_test("heif", newtest, includes)
self.check_mocks_called(pmock)
mock_response = Mock(**mockargs)
stage_includes = [{"stages": fake_stages}]
newtest = deepcopy(fulltest)
newtest["includes"] = stage_includes
newtest["stages"].insert(0, {"type": "ref", "id": "my_external_stage"})
includes["stages"] = fake_stages
with pytest.warns(FutureWarning):
with patch(
"tavern._plugins.rest.request.requests.Session.request",
return_value=mock_response,
) as pmock:
run_test("heif", newtest, includes)
self.check_mocks_called(pmock)
def test_invalid_body(self, fulltest, mockargs, includes):
"""Wrong body returned
"""
mockargs["json"] = lambda: {"wrong": "thing"}
mock_response = Mock(**mockargs)
with patch(
"tavern._plugins.rest.request.requests.Session.request",
return_value=mock_response,
) as pmock:
with pytest.raises(exceptions.TestFailError):
run_test("heif", fulltest, includes)
assert pmock.called
test responses"""
env_key = "SPECIAL_CI_MAGIC_COMMIT_TAG"
fulltest["stages"][0]["request"]["params"] = {
"a_format_key": "{tavern.env_vars.%s}" % env_key
}
mock_response = Mock(**mockargs)
with patch(
"tavern._plugins.rest.request.requests.Session.request",
return_value=mock_response,
) as pmock:
with patch.dict(os.environ, {env_key: "bleuihg"}):
run_test("heif", fulltest, includes)
assert pmock.called
mockargs = {
"spec": paho.MQTTMessage,
"payload": json.dumps({"echo": sent["message"]}).encode("utf8"),
"topic": stage["mqtt_publish"]["topic"],
}
mock_response = Mock(**mockargs)
fake_client = MagicMock(
spec=MQTTClient, message_received=Mock(return_value=mock_response)
)
with patch("tavern._plugins.mqtt.client.paho.Client", fake_client), patch(
"tavern.core.get_extra_sessions", return_value={"paho-mqtt": fake_client}
) as pmock:
run_test("heif", fulltest, includes)
assert pmock.called
load_plugins(self.global_cfg)
# INTERNAL
# NOTE - now that we can 'mark' tests, we could use pytest.mark.xfail
# instead. This doesn't differentiate between an error in verification
# and an error when running the test though.
xfail = self.spec.get("_xfail", False)
try:
verify_tests(self.spec)
fixture_values = self._load_fixture_values()
self.global_cfg["variables"].update(fixture_values)
run_test(self.path, self.spec, self.global_cfg)
except exceptions.BadSchemaError:
if xfail == "verify":
logger.info("xfailing test while verifying schema")
else:
raise
except exceptions.TavernException:
if xfail == "run":
logger.info("xfailing test when running")
else:
raise
else:
if xfail:
logger.error("Expected test to fail")
raise exceptions.TestFailError(
"Expected test to fail at {} stage".format(xfail)
)