From bc31b407d1dd13a9860ea3d7e3648e14977231c1 Mon Sep 17 00:00:00 2001 From: yuxuan-ms Date: Fri, 31 May 2024 18:52:55 +0800 Subject: [PATCH] Make multitest env start/stop failure more explicit for xfail. --- .../2899_changed.env_report_explicit.rst | 1 + testplan/common/entity/base.py | 2 +- testplan/runnable/interactive/base.py | 1 - testplan/testing/base.py | 231 +++++++++++---- testplan/testing/multitest/base.py | 26 +- .../testplan/exporters/testing/test_json.py | 5 +- .../reports/basic_run_case_test1.data | 127 ++++++++- .../reports/basic_run_suite_test2.data | 127 ++++++++- .../interactive/reports/basic_top_level.data | 266 ++++++++++++++++-- .../reports/basic_top_level_reset.data | 206 +++++++++++++- .../testplan/runnable/interactive/test_api.py | 25 +- .../runnable/interactive/test_interactive.py | 10 +- .../testing/fixtures/base/passing/report.py | 49 ++++ .../multitest/test_error_handler_hook.py | 47 ++-- .../multitest/test_multitest_drivers.py | 25 +- .../testing/multitest/test_pre_post_steps.py | 28 +- .../testplan/testing/multitest/test_xfail.py | 2 +- .../functional/testplan/testing/test_base.py | 2 +- .../runnable/interactive/test_irunner.py | 69 +++-- 19 files changed, 1022 insertions(+), 227 deletions(-) create mode 100755 doc/newsfragments/2899_changed.env_report_explicit.rst diff --git a/doc/newsfragments/2899_changed.env_report_explicit.rst b/doc/newsfragments/2899_changed.env_report_explicit.rst new file mode 100755 index 000000000..8a420f6f0 --- /dev/null +++ b/doc/newsfragments/2899_changed.env_report_explicit.rst @@ -0,0 +1 @@ +Add 'Environment Start' and 'Environment Stop' entries to Multitest report. \ No newline at end of file diff --git a/testplan/common/entity/base.py b/testplan/common/entity/base.py index 36f4e6076..db7637a25 100644 --- a/testplan/common/entity/base.py +++ b/testplan/common/entity/base.py @@ -901,7 +901,7 @@ def __init__(self, **options): self._ihandler = None @property - def resources(self): + def resources(self) -> Environment: """ Returns the :py:class:`Environment ` diff --git a/testplan/runnable/interactive/base.py b/testplan/runnable/interactive/base.py index f37871d42..50a812956 100644 --- a/testplan/runnable/interactive/base.py +++ b/testplan/runnable/interactive/base.py @@ -487,7 +487,6 @@ def start_test_resources(self, test_uid, await_results=True): """ if not await_results: return self._run_async(self.start_test_resources, test_uid) - self._set_env_status(test_uid, entity.ResourceStatus.STARTING) if self.report[test_uid].status_override == Status.ERROR: self._clear_env_errors(test_uid) diff --git a/testplan/testing/base.py b/testplan/testing/base.py index 50a008a4d..91bfb913c 100644 --- a/testplan/testing/base.py +++ b/testplan/testing/base.py @@ -37,6 +37,7 @@ subprocess_popen, ) from testplan.common.utils.timing import format_duration, parse_duration +from testplan.common.report.base import Status as ReportStatus from testplan.report import ( RuntimeStatus, ReportCategories, @@ -58,11 +59,20 @@ ASSERTION_INDENT = 8 -class ResourceHooks(Enum): - before_start = "Before Start" - after_start = "After Start" - before_stop = "Before Stop" - after_stop = "After Stop" +class ResourceHooks(str, Enum): + ENVIRONMENT_START = "Environment Start" + ENVIRONMENT_STOP = "Environment Stop" + ERROR_HANDLER = "Error Handler" + STARTING = "Starting" + STOPPING = "Stopping" + + BEFORE_START = "Before Start" + AFTER_START = "After Start" + BEFORE_STOP = "Before Stop" + AFTER_STOP = "After Stop" + + def __str__(self) -> str: + return self.value def _test_name_sanity_check(name: str) -> bool: @@ -237,15 +247,15 @@ def _new_test_report(self) -> TestGroupReport: env_status=ResourceStatus.STOPPED, ) - def _init_test_report(self) -> TestGroupReport: + def _init_test_report(self) -> None: self.result.report = self._new_test_report() def get_tags_index(self) -> Union[str, Iterable[str], Dict]: """ Return the tag index that will be used for filtering. - By default this is equal to the native tags for this object. + By default, this is equal to the native tags for this object. - However subclasses may build larger tag indices + However, subclasses may build larger tag indices by collecting tags from their children for example. """ return self.cfg.tags or {} @@ -433,6 +443,65 @@ def _set_dependencies(self) -> None: if deps: self.resources.set_dependency(deps) + def _start_resource(self) -> None: + if len(self.resources) == 0: + return + case_report = self._create_case_or_override( + ResourceHooks.ENVIRONMENT_START.value, ResourceHooks.STARTING + ) + case_result = self.cfg.result( + stdout_style=self.stdout_style, _scratch=self.scratch + ) + self.resources.start() + for uid, driver in self.resources.items(): + case_result.log(f"{driver} Status: {driver.status.tag}") + + case_report.extend(case_result.serialized_entries) + case_report.attachments.extend(case_result.attachments) + case_report.pass_if_empty() + if self.resources.start_exceptions: + for msg in self.resources.start_exceptions.values(): + case_report.logger.error(msg) + case_report.status_override = ReportStatus.ERROR + case_report.runtime_status = RuntimeStatus.NOT_RUN + else: + case_report.runtime_status = RuntimeStatus.FINISHED + pattern = f"{self.name}:{ResourceHooks.ENVIRONMENT_START}:{ResourceHooks.STARTING}" + self._xfail(pattern, case_report) + + def _stop_resource(self, is_reversed=True) -> None: + if len(self.resources) == 0: + return + case_report = self._create_case_or_override( + ResourceHooks.ENVIRONMENT_STOP.value, ResourceHooks.STOPPING.value + ) + case_result = self.cfg.result( + stdout_style=self.stdout_style, _scratch=self.scratch + ) + self.resources.stop(is_reversed=is_reversed) + case_report.extend(case_result.serialized_entries) + case_report.attachments.extend(case_result.attachments) + case_report.pass_if_empty() + if self.resources.stop_exceptions: + for msg in self.resources.stop_exceptions.values(): + case_report.logger.error(msg) + case_report.status_override = ReportStatus.ERROR + drivers = set(self.resources.start_exceptions.keys()) + drivers.update(self.resources.stop_exceptions.keys()) + for driver in drivers: + if driver.cfg.report_errors_from_logs: + error_log = os.linesep.join(driver.fetch_error_log()) + if error_log: + case_report.logger.error(error_log) + pattern = f"{self.name}:{ResourceHooks.ENVIRONMENT_STOP}:{ResourceHooks.STOPPING}" + self._xfail(pattern, case_report) + + def _finish_resource_report(self, suite_name): + if self.result.report.has_uid(suite_name): + self.result.report[ + suite_name + ].runtime_status = RuntimeStatus.FINISHED + def add_pre_resource_steps(self) -> None: """Runnable steps to be executed before environment starts.""" self._add_step(self.timer.start, "setup") @@ -444,30 +513,42 @@ def add_start_resource_steps(self) -> None: self._add_step( self._run_resource_hook, hook=self.cfg.before_start, - label=ResourceHooks.before_start.value, + hook_name=ResourceHooks.BEFORE_START.value, + suite_name=ResourceHooks.ENVIRONMENT_START.value, ) - self._add_step(self.resources.start) + self._add_step(self._start_resource) self._add_step( self._run_resource_hook, hook=self.cfg.after_start, - label=ResourceHooks.after_start.value, + hook_name=ResourceHooks.AFTER_START.value, + suite_name=ResourceHooks.ENVIRONMENT_START.value, + ) + self._add_step( + self._finish_resource_report, + suite_name=ResourceHooks.ENVIRONMENT_START.value, ) def add_stop_resource_steps(self) -> None: self._add_step( self._run_resource_hook, hook=self.cfg.before_stop, - label=ResourceHooks.before_stop.value, + hook_name=ResourceHooks.BEFORE_STOP.value, + suite_name=ResourceHooks.ENVIRONMENT_STOP.value, ) - - self._add_step(self.resources.stop, is_reversed=True) + self._add_step(self._stop_resource, is_reversed=True) self._add_step( self._run_resource_hook, hook=self.cfg.after_stop, - label=ResourceHooks.after_stop.value, + hook_name=ResourceHooks.AFTER_STOP.value, + suite_name=ResourceHooks.ENVIRONMENT_STOP.value, + ) + + self._add_step( + self._finish_resource_report, + suite_name=ResourceHooks.ENVIRONMENT_STOP.value, ) def add_pre_main_steps(self) -> None: @@ -554,9 +635,41 @@ def _run_error_handler(self) -> None: """ if self.cfg.error_handler: - self._run_resource_hook(self.cfg.error_handler, "Error handler") + self._run_resource_hook( + self.cfg.error_handler, + self.cfg.error_handler.__name__, + ResourceHooks.ERROR_HANDLER, + ) - def _run_resource_hook(self, hook: Callable, label: str) -> None: + def _get_suite_or_create(self, suite_name: str) -> TestGroupReport: + if self.result.report.has_uid(suite_name): + suite_report = self.result.report[suite_name] + else: + suite_report = TestGroupReport( + name=suite_name, + category=ReportCategories.SYNTHESIZED, + ) + self.result.report.append(suite_report) + return suite_report + + def _create_case_or_override( + self, suite_name: str, case_name: str, description: str = "" + ) -> TestCaseReport: + suite_report = self._get_suite_or_create(suite_name) + case_report = TestCaseReport( + name=case_name, + description=description, + category=ReportCategories.SYNTHESIZED, + ) + if suite_report.has_uid(case_name): + suite_report.set_by_uid(case_name, case_report) + else: + suite_report.append(case_report) + return case_report + + def _run_resource_hook( + self, hook: Callable, hook_name: str, suite_name: str + ) -> None: # TODO: env or env, result signature is mandatory not an "if" """ This method runs post/pre_start/stop hooks. User can optionally make @@ -567,26 +680,18 @@ def _run_resource_hook(self, hook: Callable, label: str) -> None: meaning that if something goes wrong we will have the stack trace in the final report. """ - if not hook: return - suite_report = TestGroupReport( - name=label, - category=ReportCategories.SYNTHESIZED, + case_report = self._create_case_or_override( + suite_name, hook_name, description=strings.get_docstring(hook) ) - case_report = TestCaseReport( - name=hook.__name__, - description=strings.get_docstring(hook), - category=ReportCategories.SYNTHESIZED, - ) - suite_report.append(case_report) case_result = self.cfg.result( stdout_style=self.stdout_style, _scratch=self.scratch ) runtime_env = self._get_runtime_environment( - testcase_name=hook.__name__, + testcase_name=hook_name, testcase_report=case_report, ) try: @@ -595,7 +700,6 @@ def _run_resource_hook(self, hook: Callable, label: str) -> None: except interface.MethodSignatureMismatch: interface.check_signature(hook, ["env"]) hook_args = (runtime_env,) - with compose_contexts(*self._get_hook_context(case_report)): hook(*hook_args) @@ -606,31 +710,18 @@ def _run_resource_hook(self, hook: Callable, label: str) -> None: self.log_testcase_status(case_report) case_report.pass_if_empty() - pattern = ":".join([self.name, label, hook.__name__]) + pattern = ":".join([self.name, suite_name, hook_name]) self._xfail(pattern, case_report) case_report.runtime_status = RuntimeStatus.FINISHED - suite_report.runtime_status = RuntimeStatus.FINISHED - - if self.result.report.has_uid(label): - self.result.report[label] = suite_report - else: - self.result.report.append(suite_report) - - def _dry_run_resource_hook(self, hook: Callable, label: str) -> None: + def _dry_run_resource_hook( + self, hook: Callable, hook_name: str, suite_name: str + ) -> None: if not hook: return - - suite_report = TestGroupReport( - name=label, - category=ReportCategories.SYNTHESIZED, + self._create_case_or_override( + suite_name, hook_name, description=strings.get_docstring(hook) ) - case_report = TestCaseReport( - name=hook.__name__, - category=ReportCategories.SYNTHESIZED, - ) - suite_report.append(case_report) - self.result.report.append(suite_report) def _dry_run_testsuites(self) -> None: suites_to_run = self.test_context @@ -647,7 +738,7 @@ def _dry_run_testsuites(self) -> None: self.result.report.append(testsuite_report) - def dry_run(self) -> None: + def dry_run(self) -> RunnableResult: """ Return an empty report skeleton for this test including all testsuites, testcases etc. hierarchy. Does not run any tests. @@ -655,19 +746,45 @@ def dry_run(self) -> None: self.result.report = self._new_test_report() - for hook, label in ( - (self.cfg.before_start, ResourceHooks.before_start.value), - (self.cfg.after_start, ResourceHooks.after_start.value), + for hook, hook_name, suite_name in ( + ( + self.cfg.before_start, + ResourceHooks.BEFORE_START.value, + ResourceHooks.ENVIRONMENT_START.value, + ), + ( + (lambda: None) if self.cfg.environment else None, + ResourceHooks.STARTING.value, + ResourceHooks.ENVIRONMENT_START.value, + ), + ( + self.cfg.after_start, + ResourceHooks.AFTER_START.value, + ResourceHooks.ENVIRONMENT_START.value, + ), ): - self._dry_run_resource_hook(hook, label) + self._dry_run_resource_hook(hook, hook_name, suite_name) self._dry_run_testsuites() - for hook, label in ( - (self.cfg.before_stop, ResourceHooks.before_stop.value), - (self.cfg.after_stop, ResourceHooks.after_stop.value), + for hook, hook_name, suite_name in ( + ( + self.cfg.before_stop, + ResourceHooks.BEFORE_STOP.value, + ResourceHooks.ENVIRONMENT_STOP.value, + ), + ( + (lambda: None) if self.cfg.environment else None, + ResourceHooks.STOPPING.value, + ResourceHooks.ENVIRONMENT_STOP.value, + ), + ( + self.cfg.after_stop, + ResourceHooks.AFTER_STOP.value, + ResourceHooks.ENVIRONMENT_STOP.value, + ), ): - self._dry_run_resource_hook(hook, label) + self._dry_run_resource_hook(hook, hook_name, suite_name) return self.result diff --git a/testplan/testing/multitest/base.py b/testplan/testing/multitest/base.py index e322f32f4..6d1317937 100644 --- a/testplan/testing/multitest/base.py +++ b/testplan/testing/multitest/base.py @@ -581,8 +581,9 @@ def skip_step(self, step) -> bool: or self._get_error_logs() ) elif step in ( - self.resources.start, - self.resources.stop, + self._start_resource, + self._stop_resource, + self._finish_resource_report, self.apply_xfail_tests, ): return False @@ -591,27 +592,6 @@ def skip_step(self, step) -> bool: return True return False - def post_step_call(self, step): - """Callable to be executed after each step.""" - exceptions = None - if step == self.resources.start: - exceptions = self.resources.start_exceptions - elif step == self.resources.stop: - exceptions = self.resources.stop_exceptions - if exceptions: - for msg in exceptions.values(): - self.result.report.logger.error(msg) - self.result.report.status_override = Status.ERROR - - if step == self.resources.stop: - drivers = set(self.resources.start_exceptions.keys()) - drivers.update(self.resources.stop_exceptions.keys()) - for driver in drivers: - if driver.cfg.report_errors_from_logs: - error_log = os.linesep.join(driver.fetch_error_log()) - if error_log: - self.result.report.logger.error(error_log) - def add_pre_resource_steps(self): """Runnable steps to be executed before environment starts.""" diff --git a/tests/functional/testplan/exporters/testing/test_json.py b/tests/functional/testplan/exporters/testing/test_json.py index 7390c7deb..444e59e2c 100644 --- a/tests/functional/testplan/exporters/testing/test_json.py +++ b/tests/functional/testplan/exporters/testing/test_json.py @@ -188,7 +188,8 @@ def test_json_exporter_generating_split_report(runpath): assert ( len(structure[1]["entries"]) == 2 ) # one suite in 2nd multitest, 1 synthesized - assert structure[1]["entries"][0]["name"] == "After Start" + assert structure[1]["entries"][0]["name"] == "Environment Start" + assert structure[1]["entries"][0]["entries"][0]["name"] == "After Start" assert len(structure[1]["entries"][0]["entries"]) == 1 assert structure[1]["entries"][1]["name"] == "Beta" # 1st suite name assert len(structure[1]["entries"][1]["entries"]) == 2 # 2 testcases @@ -207,7 +208,7 @@ def test_json_exporter_generating_split_report(runpath): assert len(assertions["test_error"]) == 0 # 2 assertions in synthesized cases, i.e. custom hooks assert assertions["setup"][0]["type"] == "Log" - assert assertions["secondary_after_start"][0]["type"] == "Log" + assert assertions["After Start"][0]["type"] == "Log" def test_implicit_exporter_initialization(runpath): diff --git a/tests/functional/testplan/runnable/interactive/reports/basic_run_case_test1.data b/tests/functional/testplan/runnable/interactive/reports/basic_run_case_test1.data index c4026001b..8a2da3044 100644 --- a/tests/functional/testplan/runnable/interactive/reports/basic_run_case_test1.data +++ b/tests/functional/testplan/runnable/interactive/reports/basic_run_case_test1.data @@ -18,9 +18,61 @@ "passed": 0, "total": 0 }, - "definition_name": "After Start", + "definition_name": "Environment Start", "description": null, "entries": [ + { + "name": "Starting", + "definition_name": "Starting", + "counter": { + "passed": 1, + "failed": 0, + "total": 1 + }, + "status_override": null, + "category": "synthesized", + "runtime_status": "finished", + "status": "passed", + "logs": [], + "timer": {}, + "description": "", + "status_reason": null, + "type": "TestCaseReport", + "parent_uids": [ + "InteractivePlan", + "Test1", + "Environment Start" + ], + "entries": [ + { + "category": "DEFAULT", + "description": "TCPServer[server] Status: STARTED", + "file_path": "", + "flag": "DEFAULT", + "line_no": 0, + "machine_time": "", + "message": "TCPServer[server] Status: STARTED", + "meta_type": "entry", + "type": "Log", + "utc_time": "" + }, + { + "category": "DEFAULT", + "description": "TCPClient[client] Status: STARTED", + "file_path": "", + "flag": "DEFAULT", + "line_no": 0, + "machine_time": "", + "message": "TCPClient[client] Status: STARTED", + "meta_type": "entry", + "type": "Log", + "utc_time": "" + } + ], + "hash": 0, + "uid": "Starting", + "tags": {} + }, { "category": "synthesized", "counter": { @@ -28,16 +80,16 @@ "passed": 1, "total": 1 }, - "definition_name": "accept_connection", + "definition_name": "After Start", "description": null, "entries": [], "hash": 0, "logs": [], - "name": "accept_connection", + "name": "After Start", "parent_uids": [ "InteractivePlan", "Test1", - "After Start" + "Environment Start" ], "runtime_status": "finished", "status": "passed", @@ -46,7 +98,7 @@ "tags": {}, "timer": {}, "type": "TestCaseReport", - "uid": "accept_connection" + "uid": "After Start" } ], "env_status": null, @@ -54,7 +106,7 @@ "hash": 0, "host": null, "logs": [], - "name": "After Start", + "name": "Environment Start", "parent_uids": [ "InteractivePlan", "Test1" @@ -68,7 +120,7 @@ "tags": {}, "timer": {}, "type": "TestGroupReport", - "uid": "After Start" + "uid": "Environment Start" }, { "category": "testsuite", @@ -366,6 +418,67 @@ "timer": {}, "type": "TestGroupReport", "uid": "Custom_1" + }, + { + "type": "TestGroupReport", + "runtime_status": "ready", + "parent_uids": [ + "InteractivePlan", + "Test1" + ], + "category": "synthesized", + "hash": 0, + "tags": {}, + "uid": "Environment Stop", + "fix_spec_path": null, + "entries": [ + { + "status_reason": null, + "entries": [], + "type": "TestCaseReport", + "runtime_status": "ready", + "parent_uids": [ + "InteractivePlan", + "Test1", + "Environment Stop" + ], + "category": "synthesized", + "timer": {}, + "hash": 0, + "definition_name": "Stopping", + "uid": "Stopping", + "counter": { + "passed": 0, + "failed": 0, + "total": 1, + "unknown": 1 + }, + "status_override": null, + "tags": {}, + "name": "Stopping", + "status": "unknown", + "description": null, + "logs": [] + } + ], + "counter": { + "passed": 0, + "failed": 0, + "total": 0 + }, + "status_override": null, + "children": [], + "status": "unknown", + "description": null, + "status_reason": null, + "host": null, + "name": "Environment Stop", + "strict_order": false, + "env_status": null, + "part": null, + "timer": {}, + "definition_name": "Environment Stop", + "logs": [] } ], "env_status": "STARTED", diff --git a/tests/functional/testplan/runnable/interactive/reports/basic_run_suite_test2.data b/tests/functional/testplan/runnable/interactive/reports/basic_run_suite_test2.data index 3c7afafb6..2baa0438f 100644 --- a/tests/functional/testplan/runnable/interactive/reports/basic_run_suite_test2.data +++ b/tests/functional/testplan/runnable/interactive/reports/basic_run_suite_test2.data @@ -18,9 +18,61 @@ "passed": 0, "total": 0 }, - "definition_name": "After Start", + "definition_name": "Environment Start", "description": null, "entries": [ + { + "name": "Starting", + "definition_name": "Starting", + "counter": { + "passed": 1, + "failed": 0, + "total": 1 + }, + "status_override": null, + "category": "synthesized", + "runtime_status": "finished", + "status": "passed", + "logs": [], + "timer": {}, + "description": "", + "status_reason": null, + "type": "TestCaseReport", + "parent_uids": [ + "InteractivePlan", + "Test2", + "Environment Start" + ], + "entries": [ + { + "category": "DEFAULT", + "description": "TCPServer[server] Status: STARTED", + "file_path": "", + "flag": "DEFAULT", + "line_no": 0, + "machine_time": "", + "message": "TCPServer[server] Status: STARTED", + "meta_type": "entry", + "type": "Log", + "utc_time": "" + }, + { + "category": "DEFAULT", + "description": "TCPClient[client] Status: STARTED", + "file_path": "", + "flag": "DEFAULT", + "line_no": 0, + "machine_time": "", + "message": "TCPClient[client] Status: STARTED", + "meta_type": "entry", + "type": "Log", + "utc_time": "" + } + ], + "hash": 0, + "uid": "Starting", + "tags": {} + }, { "category": "synthesized", "counter": { @@ -28,16 +80,16 @@ "passed": 1, "total": 1 }, - "definition_name": "accept_connection", + "definition_name": "After Start", "description": null, "entries": [], "hash": 0, "logs": [], - "name": "accept_connection", + "name": "After Start", "parent_uids": [ "InteractivePlan", "Test2", - "After Start" + "Environment Start" ], "runtime_status": "finished", "status": "passed", @@ -46,7 +98,7 @@ "tags": {}, "timer": {}, "type": "TestCaseReport", - "uid": "accept_connection" + "uid": "After Start" } ], "env_status": null, @@ -54,7 +106,7 @@ "hash": 0, "host": null, "logs": [], - "name": "After Start", + "name": "Environment Start", "parent_uids": [ "InteractivePlan", "Test2" @@ -68,7 +120,7 @@ "tags": {}, "timer": {}, "type": "TestGroupReport", - "uid": "After Start" + "uid": "Environment Start" }, { "category": "testsuite", @@ -349,6 +401,67 @@ "timer": {}, "type": "TestGroupReport", "uid": "Custom_1" + }, + { + "type": "TestGroupReport", + "runtime_status": "ready", + "parent_uids": [ + "InteractivePlan", + "Test2" + ], + "category": "synthesized", + "hash": 0, + "tags": {}, + "uid": "Environment Stop", + "fix_spec_path": null, + "entries": [ + { + "status_reason": null, + "entries": [], + "type": "TestCaseReport", + "runtime_status": "ready", + "parent_uids": [ + "InteractivePlan", + "Test2", + "Environment Stop" + ], + "category": "synthesized", + "timer": {}, + "hash": 0, + "definition_name": "Stopping", + "uid": "Stopping", + "counter": { + "passed": 0, + "failed": 0, + "total": 1, + "unknown": 1 + }, + "status_override": null, + "tags": {}, + "name": "Stopping", + "status": "unknown", + "description": null, + "logs": [] + } + ], + "counter": { + "passed": 0, + "failed": 0, + "total": 0 + }, + "status_override": null, + "children": [], + "status": "unknown", + "description": null, + "status_reason": null, + "host": null, + "name": "Environment Stop", + "strict_order": false, + "env_status": null, + "part": null, + "timer": {}, + "definition_name": "Environment Stop", + "logs": [] } ], "env_status": "STARTED", diff --git a/tests/functional/testplan/runnable/interactive/reports/basic_top_level.data b/tests/functional/testplan/runnable/interactive/reports/basic_top_level.data index d8b182999..f177bd933 100644 --- a/tests/functional/testplan/runnable/interactive/reports/basic_top_level.data +++ b/tests/functional/testplan/runnable/interactive/reports/basic_top_level.data @@ -40,9 +40,61 @@ "passed": 0, "total": 0 }, - "definition_name": "After Start", + "definition_name": "Environment Start", "description": null, "entries": [ + { + "name": "Starting", + "definition_name": "Starting", + "counter": { + "passed": 1, + "failed": 0, + "total": 1 + }, + "status_override": null, + "category": "synthesized", + "runtime_status": "finished", + "status": "passed", + "logs": [], + "timer": {}, + "description": "", + "status_reason": null, + "type": "TestCaseReport", + "parent_uids": [ + "InteractivePlan", + "Test1", + "Environment Start" + ], + "entries": [ + { + "category": "DEFAULT", + "description": "TCPServer[server] Status: STARTED", + "file_path": "", + "flag": "DEFAULT", + "line_no": 0, + "machine_time": "", + "message": "TCPServer[server] Status: STARTED", + "meta_type": "entry", + "type": "Log", + "utc_time": "" + }, + { + "category": "DEFAULT", + "description": "TCPClient[client] Status: STARTED", + "file_path": "", + "flag": "DEFAULT", + "line_no": 0, + "machine_time": "", + "message": "TCPClient[client] Status: STARTED", + "meta_type": "entry", + "type": "Log", + "utc_time": "" + } + ], + "hash": 0, + "uid": "Starting", + "tags": {} + }, { "category": "synthesized", "counter": { @@ -50,16 +102,16 @@ "passed": 1, "total": 1 }, - "definition_name": "accept_connection", + "definition_name": "After Start", "description": null, "entries": [], "hash": 0, "logs": [], - "name": "accept_connection", + "name": "After Start", "parent_uids": [ "InteractivePlan", "Test1", - "After Start" + "Environment Start" ], "runtime_status": "finished", "status": "passed", @@ -68,7 +120,7 @@ "tags": {}, "timer": {}, "type": "TestCaseReport", - "uid": "accept_connection" + "uid": "After Start" } ], "env_status": null, @@ -76,7 +128,7 @@ "hash": 0, "host": null, "logs": [], - "name": "After Start", + "name": "Environment Start", "parent_uids": [ "InteractivePlan", "Test1" @@ -90,7 +142,7 @@ "tags": {}, "timer": {}, "type": "TestGroupReport", - "uid": "After Start" + "uid": "Environment Start" }, { "category": "testsuite", @@ -486,6 +538,67 @@ "timer": {}, "type": "TestGroupReport", "uid": "Custom_1" + }, + { + "definition_name": "Environment Stop", + "children": [], + "status": "unknown", + "description": null, + "status_reason": null, + "type": "TestGroupReport", + "parent_uids": [ + "InteractivePlan", + "Test1" + ], + "fix_spec_path": null, + "part": null, + "tags": {}, + "logs": [], + "uid": "Environment Stop", + "name": "Environment Stop", + "host": null, + "status_override": null, + "category": "synthesized", + "runtime_status": "ready", + "timer": {}, + "strict_order": false, + "counter": { + "passed": 0, + "failed": 0, + "total": 0 + }, + "entries": [ + { + "name": "Stopping", + "definition_name": "Stopping", + "counter": { + "passed": 0, + "failed": 0, + "total": 1, + "unknown": 1 + }, + "status_override": null, + "category": "synthesized", + "runtime_status": "ready", + "status": "unknown", + "logs": [], + "timer": {}, + "description": null, + "status_reason": null, + "type": "TestCaseReport", + "parent_uids": [ + "InteractivePlan", + "Test1", + "Environment Stop" + ], + "entries": [], + "hash": 0, + "uid": "Stopping", + "tags": {} + } + ], + "env_status": null, + "hash": 0 } ], "env_status": "STARTED", @@ -498,8 +611,8 @@ "InteractivePlan" ], "part": null, - "runtime_status": "finished", - "status": "passed", + "runtime_status": "ready", + "status": "unknown", "status_override": null, "status_reason": null, "strict_order": false, @@ -527,9 +640,61 @@ "passed": 0, "total": 0 }, - "definition_name": "After Start", + "definition_name": "Environment Start", "description": null, "entries": [ + { + "name": "Starting", + "definition_name": "Starting", + "counter": { + "passed": 1, + "failed": 0, + "total": 1 + }, + "status_override": null, + "category": "synthesized", + "runtime_status": "finished", + "status": "passed", + "logs": [], + "timer": {}, + "description": "", + "status_reason": null, + "type": "TestCaseReport", + "parent_uids": [ + "InteractivePlan", + "Test2", + "Environment Start" + ], + "entries": [ + { + "category": "DEFAULT", + "description": "TCPServer[server] Status: STARTED", + "file_path": "", + "flag": "DEFAULT", + "line_no": 0, + "machine_time": "", + "message": "TCPServer[server] Status: STARTED", + "meta_type": "entry", + "type": "Log", + "utc_time": "" + }, + { + "category": "DEFAULT", + "description": "TCPClient[client] Status: STARTED", + "file_path": "", + "flag": "DEFAULT", + "line_no": 0, + "machine_time": "", + "message": "TCPClient[client] Status: STARTED", + "meta_type": "entry", + "type": "Log", + "utc_time": "" + } + ], + "hash": 0, + "uid": "Starting", + "tags": {} + }, { "category": "synthesized", "counter": { @@ -537,16 +702,16 @@ "passed": 1, "total": 1 }, - "definition_name": "accept_connection", + "definition_name": "After Start", "description": null, "entries": [], "hash": 0, "logs": [], - "name": "accept_connection", + "name": "After Start", "parent_uids": [ "InteractivePlan", "Test2", - "After Start" + "Environment Start" ], "runtime_status": "finished", "status": "passed", @@ -555,7 +720,7 @@ "tags": {}, "timer": {}, "type": "TestCaseReport", - "uid": "accept_connection" + "uid": "After Start" } ], "env_status": null, @@ -563,7 +728,7 @@ "hash": 0, "host": null, "logs": [], - "name": "After Start", + "name": "Environment Start", "parent_uids": [ "InteractivePlan", "Test2" @@ -577,7 +742,7 @@ "tags": {}, "timer": {}, "type": "TestGroupReport", - "uid": "After Start" + "uid": "Environment Start" }, { "category": "testsuite", @@ -973,6 +1138,67 @@ "timer": {}, "type": "TestGroupReport", "uid": "Custom_1" + }, + { + "definition_name": "Environment Stop", + "children": [], + "status": "unknown", + "description": null, + "status_reason": null, + "type": "TestGroupReport", + "parent_uids": [ + "InteractivePlan", + "Test2" + ], + "fix_spec_path": null, + "part": null, + "tags": {}, + "logs": [], + "uid": "Environment Stop", + "name": "Environment Stop", + "host": null, + "status_override": null, + "category": "synthesized", + "runtime_status": "ready", + "timer": {}, + "strict_order": false, + "counter": { + "passed": 0, + "failed": 0, + "total": 0 + }, + "entries": [ + { + "name": "Stopping", + "definition_name": "Stopping", + "counter": { + "passed": 0, + "failed": 0, + "total": 1, + "unknown": 1 + }, + "status_override": null, + "category": "synthesized", + "runtime_status": "ready", + "status": "unknown", + "logs": [], + "timer": {}, + "description": null, + "status_reason": null, + "type": "TestCaseReport", + "parent_uids": [ + "InteractivePlan", + "Test2", + "Environment Stop" + ], + "entries": [], + "hash": 0, + "uid": "Stopping", + "tags": {} + } + ], + "env_status": null, + "hash": 0 } ], "env_status": "STARTED", @@ -985,8 +1211,8 @@ "InteractivePlan" ], "part": null, - "runtime_status": "finished", - "status": "passed", + "runtime_status": "ready", + "status": "unknown", "status_override": null, "status_reason": null, "strict_order": false, @@ -1004,8 +1230,8 @@ "name": "InteractivePlan", "parent_uids": [], "resource_meta_path": null, - "runtime_status": "finished", - "status": "passed", + "runtime_status": "ready", + "status": "unknown", "status_override": null, "status_reason": null, "tags_index": {}, diff --git a/tests/functional/testplan/runnable/interactive/reports/basic_top_level_reset.data b/tests/functional/testplan/runnable/interactive/reports/basic_top_level_reset.data index ff8589a17..4111e6a0e 100644 --- a/tests/functional/testplan/runnable/interactive/reports/basic_top_level_reset.data +++ b/tests/functional/testplan/runnable/interactive/reports/basic_top_level_reset.data @@ -31,9 +31,37 @@ "passed": 0, "total": 0 }, - "definition_name": "After Start", + "definition_name": "Environment Start", "description": null, "entries": [ + { + "category": "synthesized", + "counter": { + "passed": 0, + "failed": 0, + "total": 1, + "unknown": 1 + }, + "definition_name": "Starting", + "description": null, + "entries": [], + "hash": 0, + "logs": [], + "name": "Starting", + "parent_uids": [ + "InteractivePlan", + "Test1", + "Environment Start" + ], + "runtime_status": "ready", + "status": "unknown", + "status_override": null, + "status_reason": null, + "tags": {}, + "timer": {}, + "type": "TestCaseReport", + "uid": "Starting" + }, { "category": "synthesized", "counter": { @@ -42,16 +70,16 @@ "total": 1, "unknown": 1 }, - "definition_name": "accept_connection", + "definition_name": "After Start", "description": null, "entries": [], "hash": 0, "logs": [], - "name": "accept_connection", + "name": "After Start", "parent_uids": [ "InteractivePlan", "Test1", - "After Start" + "Environment Start" ], "runtime_status": "ready", "status": "unknown", @@ -60,7 +88,7 @@ "tags": {}, "timer": {}, "type": "TestCaseReport", - "uid": "accept_connection" + "uid": "After Start" } ], "env_status": null, @@ -68,7 +96,7 @@ "hash": 0, "host": null, "logs": [], - "name": "After Start", + "name": "Environment Start", "parent_uids": [ "InteractivePlan", "Test1" @@ -82,7 +110,7 @@ "tags": {}, "timer": {}, "type": "TestGroupReport", - "uid": "After Start" + "uid": "Environment Start" }, { "category": "testsuite", @@ -363,6 +391,67 @@ "timer": {}, "type": "TestGroupReport", "uid": "Custom_1" + }, + { + "definition_name": "Environment Stop", + "children": [], + "status": "unknown", + "description": null, + "status_reason": null, + "type": "TestGroupReport", + "parent_uids": [ + "InteractivePlan", + "Test1" + ], + "fix_spec_path": null, + "part": null, + "tags": {}, + "logs": [], + "uid": "Environment Stop", + "name": "Environment Stop", + "host": null, + "status_override": null, + "category": "synthesized", + "runtime_status": "ready", + "timer": {}, + "strict_order": false, + "counter": { + "passed": 0, + "failed": 0, + "total": 0 + }, + "entries": [ + { + "name": "Stopping", + "definition_name": "Stopping", + "counter": { + "passed": 0, + "failed": 0, + "total": 1, + "unknown": 1 + }, + "status_override": null, + "category": "synthesized", + "runtime_status": "ready", + "status": "unknown", + "logs": [], + "timer": {}, + "description": null, + "status_reason": null, + "type": "TestCaseReport", + "parent_uids": [ + "InteractivePlan", + "Test1", + "Environment Stop" + ], + "entries": [], + "hash": 0, + "uid": "Stopping", + "tags": {} + } + ], + "env_status": null, + "hash": 0 } ], "env_status": "STOPPED", @@ -405,9 +494,37 @@ "passed": 0, "total": 0 }, - "definition_name": "After Start", + "definition_name": "Environment Start", "description": null, "entries": [ + { + "entries": [], + "runtime_status": "ready", + "name": "Starting", + "tags": {}, + "category": "synthesized", + "status_reason": null, + "parent_uids": [ + "InteractivePlan", + "Test2", + "Environment Start" + ], + "definition_name": "Starting", + "description": null, + "status_override": null, + "status": "unknown", + "logs": [], + "hash": 0, + "counter": { + "passed": 0, + "failed": 0, + "total": 1, + "unknown": 1 + }, + "type": "TestCaseReport", + "uid": "Starting", + "timer": {} + }, { "category": "synthesized", "counter": { @@ -416,16 +533,16 @@ "total": 1, "unknown": 1 }, - "definition_name": "accept_connection", + "definition_name": "After Start", "description": null, "entries": [], "hash": 0, "logs": [], - "name": "accept_connection", + "name": "After Start", "parent_uids": [ "InteractivePlan", "Test2", - "After Start" + "Environment Start" ], "runtime_status": "ready", "status": "unknown", @@ -434,7 +551,7 @@ "tags": {}, "timer": {}, "type": "TestCaseReport", - "uid": "accept_connection" + "uid": "After Start" } ], "env_status": null, @@ -442,7 +559,7 @@ "hash": 0, "host": null, "logs": [], - "name": "After Start", + "name": "Environment Start", "parent_uids": [ "InteractivePlan", "Test2" @@ -456,7 +573,7 @@ "tags": {}, "timer": {}, "type": "TestGroupReport", - "uid": "After Start" + "uid": "Environment Start" }, { "category": "testsuite", @@ -737,6 +854,67 @@ "timer": {}, "type": "TestGroupReport", "uid": "Custom_1" + }, + { + "definition_name": "Environment Stop", + "children": [], + "status": "unknown", + "description": null, + "status_reason": null, + "type": "TestGroupReport", + "parent_uids": [ + "InteractivePlan", + "Test2" + ], + "fix_spec_path": null, + "part": null, + "tags": {}, + "logs": [], + "uid": "Environment Stop", + "name": "Environment Stop", + "host": null, + "status_override": null, + "category": "synthesized", + "runtime_status": "ready", + "timer": {}, + "strict_order": false, + "counter": { + "passed": 0, + "failed": 0, + "total": 0 + }, + "entries": [ + { + "name": "Stopping", + "definition_name": "Stopping", + "counter": { + "passed": 0, + "failed": 0, + "total": 1, + "unknown": 1 + }, + "status_override": null, + "category": "synthesized", + "runtime_status": "ready", + "status": "unknown", + "logs": [], + "timer": {}, + "description": null, + "status_reason": null, + "type": "TestCaseReport", + "parent_uids": [ + "InteractivePlan", + "Test2", + "Environment Stop" + ], + "entries": [], + "hash": 0, + "uid": "Stopping", + "tags": {} + } + ], + "env_status": null, + "hash": 0 } ], "env_status": "STOPPED", diff --git a/tests/functional/testplan/runnable/interactive/test_api.py b/tests/functional/testplan/runnable/interactive/test_api.py index e43f4c08d..7117c8df9 100644 --- a/tests/functional/testplan/runnable/interactive/test_api.py +++ b/tests/functional/testplan/runnable/interactive/test_api.py @@ -1051,11 +1051,18 @@ def test_cannot_start_environment(plan2): ) # Check the error message + mtest_url = ( + "http://localhost:{}/api/v1/interactive/report/tests/" + "BrokenMTest/suites/Environment%2520Start/testcases" + ).format(port) + rsp = requests.get(mtest_url) assert rsp.status_code == 200 mtest_json = rsp.json() - assert len(mtest_json["logs"]) == 1 - assert "Failed to start with no reason" in mtest_json["logs"][0]["message"] + assert len(mtest_json[0]["logs"]) == 1 + assert ( + "Failed to start with no reason" in mtest_json[0]["logs"][0]["message"] + ) def test_cannot_run_mtest(plan2): @@ -1091,7 +1098,7 @@ def test_cannot_run_mtest(plan2): _check_test_status, mtest_url, Status.ERROR.to_json_compatible(), - RuntimeStatus.NOT_RUN.to_json_compatible(), + RuntimeStatus.READY.to_json_compatible(), updated_json["hash"], ), interval=0.2, @@ -1100,11 +1107,15 @@ def test_cannot_run_mtest(plan2): ) # Check the error message - rsp = requests.get(mtest_url) + ts_url = ( + "http://localhost:{}/api/v1/interactive/report/tests/" + "BrokenMTest/suites/Environment%2520Start/testcases" + ).format(port) + rsp = requests.get(ts_url) assert rsp.status_code == 200 - mtest_json = rsp.json() - assert len(mtest_json["logs"]) == 1 - assert "Failed to start with no reason" in mtest_json["logs"][0]["message"] + ts_json = rsp.json() + assert len(ts_json[0]["logs"]) == 1 + assert "Failed to start with no reason" in ts_json[0]["logs"][0]["message"] def test_run_testcases_sequentially(plan3): diff --git a/tests/functional/testplan/runnable/interactive/test_interactive.py b/tests/functional/testplan/runnable/interactive/test_interactive.py index 70a6e1fbd..94ec90ce1 100644 --- a/tests/functional/testplan/runnable/interactive/test_interactive.py +++ b/tests/functional/testplan/runnable/interactive/test_interactive.py @@ -202,7 +202,15 @@ def test_top_level_tests(): compare( BRSTest2, plan.interactive.test_report("Test2"), - ignore=["hash", "information", "timer"], + ignore=[ + "hash", + "information", + "timer", + "machine_time", + "utc_time", + "file_path", + "line_no", + ], )[0] is True ) diff --git a/tests/functional/testplan/testing/fixtures/base/passing/report.py b/tests/functional/testplan/testing/fixtures/base/passing/report.py index 265ebc5a2..757203d4d 100644 --- a/tests/functional/testplan/testing/fixtures/base/passing/report.py +++ b/tests/functional/testplan/testing/fixtures/base/passing/report.py @@ -38,3 +38,52 @@ ), ], ) + + +expected_report_with_driver = TestReport( + name="plan", + entries=[ + TestGroupReport( + name="MyTest", + category="dummytest", + entries=[ + TestGroupReport( + name="Environment Start", + category="synthesized", + entries=[ + TestCaseReport( + name="Starting", + uid="Starting", + description="", + entries=[ + { + "type": "Log", + "description": "MyDriver[My executable] Status: STARTED", + }, + ], + ) + ], + tags=None, + ), + TestGroupReport( + name="ProcessChecks", + category="testsuite", + entries=[testcase_report], + ), + TestGroupReport( + name="Environment Stop", + category="synthesized", + entries=[ + TestCaseReport( + name="Stopping", + uid="Stopping", + description="", + entries=[], + ) + ], + tags=None, + ), + ], + ), + ], +) diff --git a/tests/functional/testplan/testing/multitest/test_error_handler_hook.py b/tests/functional/testplan/testing/multitest/test_error_handler_hook.py index b3d6fd458..717cd9929 100644 --- a/tests/functional/testplan/testing/multitest/test_error_handler_hook.py +++ b/tests/functional/testplan/testing/multitest/test_error_handler_hook.py @@ -66,35 +66,25 @@ def test_driver_failure(mockplan): mockplan.add(multitest) mockplan.run() - expected_report = TestReport( - name="plan", + expected_report = TestGroupReport( + name="Error Handler", + category=ReportCategories.SYNTHESIZED, entries=[ - TestGroupReport( - name="MyMultitest", - category=ReportCategories.MULTITEST, + TestCaseReport( + name="error_handler_fn", entries=[ - TestGroupReport( - name="Error handler", - category=ReportCategories.SYNTHESIZED, - entries=[ - TestCaseReport( - name="error_handler_fn", - entries=[ - { - "description": "Error handler ran!", - "type": "Log", - } - ], - ), - ], - ), + { + "description": "Error handler ran!", + "type": "Log", + } ], - status_override=Status.ERROR, - ) + ), ], ) - check_report(expected_report, mockplan.report) + assert mockplan.report.status == Status.ERROR + assert mockplan.report.entries[0].status == Status.ERROR + check_report(expected_report, mockplan.report.entries[0].entries[2]) def test_suite_hook_failure(mockplan): @@ -126,7 +116,7 @@ def test_suite_hook_failure(mockplan): tags=None, ), TestGroupReport( - name="Error handler", + name="Error Handler", category=ReportCategories.SYNTHESIZED, entries=[ TestCaseReport( @@ -145,6 +135,9 @@ def test_suite_hook_failure(mockplan): ], ) + assert mockplan.report.status == Status.ERROR + assert mockplan.report.entries[0].status == Status.ERROR + assert mockplan.report.entries[0].entries[1].status == Status.PASSED check_report(expected_report, mockplan.report) @@ -166,11 +159,11 @@ def test_multitest_hook_failure(mockplan): category=ReportCategories.MULTITEST, entries=[ TestGroupReport( - name="After Start", + name="Environment Start", category=ReportCategories.SYNTHESIZED, entries=[ TestCaseReport( - name="raising_hook", + name="After Start", entries=[], status_override=Status.ERROR, ) @@ -189,7 +182,7 @@ def test_multitest_hook_failure(mockplan): tags=None, ), TestGroupReport( - name="Error handler", + name="Error Handler", category=ReportCategories.SYNTHESIZED, entries=[ TestCaseReport( diff --git a/tests/functional/testplan/testing/multitest/test_multitest_drivers.py b/tests/functional/testplan/testing/multitest/test_multitest_drivers.py index 6f403920f..b4545ef8e 100644 --- a/tests/functional/testplan/testing/multitest/test_multitest_drivers.py +++ b/tests/functional/testplan/testing/multitest/test_multitest_drivers.py @@ -237,11 +237,13 @@ def test_multitest_driver_startup_failure(mockplan): res = mockplan.result assert res.run is True report = res.report - - assert "Exception: Startup error" in report.entries[0].logs[0]["message"] - text = report.entries[0].logs[1]["message"].split(os.linesep) - assert re.match(r".*Information from log file:.+stderr.*", text[0]) - assert re.match(r".*Error found.*", text[1]) + assert ( + "Exception: Startup error" + in report.entries[0].entries[0].entries[0].logs[0]["message"] + ) + text = report.entries[0].entries[0].entries[0].logs[0]["message"] + assert re.search(r".*Information from log file:.+stderr.*", text) + assert re.search(r".*Error found.*", text) def test_multitest_driver_fetch_error_log(mockplan): @@ -269,12 +271,15 @@ def test_multitest_driver_fetch_error_log(mockplan): assert res.run is True report = res.report - assert "Exception: Shutdown error" in report.entries[0].logs[0]["message"] + assert ( + "Exception: Shutdown error" + in report.entries[0].entries[-1].entries[0].logs[0]["message"] + ) - text = report.entries[0].logs[1]["message"].split(os.linesep) - assert re.match(r".*Information from log file:.+stdout.*", text[0]) - for idx, line in enumerate(text[1:]): - assert re.match(r".*This is line 99{}.*".format(idx), line) + text = report.entries[0].entries[-1].entries[0].logs[0]["message"] + assert re.search(r".*Information from log file:.+stdout.*", text) + for idx in range(10): + assert re.search(r".*This is line 99{}.*".format(idx), text) def test_multitest_driver_start_timeout(): diff --git a/tests/functional/testplan/testing/multitest/test_pre_post_steps.py b/tests/functional/testplan/testing/multitest/test_pre_post_steps.py index e730074ed..3d8a98825 100644 --- a/tests/functional/testplan/testing/multitest/test_pre_post_steps.py +++ b/tests/functional/testplan/testing/multitest/test_pre_post_steps.py @@ -61,22 +61,16 @@ def test_pre_post_steps(mockplan): category=ReportCategories.MULTITEST, entries=[ TestGroupReport( - name="Before Start", + name="Environment Start", category=ReportCategories.SYNTHESIZED, entries=[ TestCaseReport( - name="check_func_1", + name="Before Start", category=ReportCategories.SYNTHESIZED, entries=[{"type": "Equal", "passed": True}], ), - ], - ), - TestGroupReport( - name="After Start", - category=ReportCategories.SYNTHESIZED, - entries=[ TestCaseReport( - name="check_func_2", + name="After Start", category=ReportCategories.SYNTHESIZED, ), ], @@ -93,21 +87,15 @@ def test_pre_post_steps(mockplan): ], ), TestGroupReport( - name="Before Stop", + name="Environment Stop", category=ReportCategories.SYNTHESIZED, entries=[ TestCaseReport( - name="check_func_3", + name="Before Stop", category=ReportCategories.SYNTHESIZED, ), - ], - ), - TestGroupReport( - name="After Stop", - category=ReportCategories.SYNTHESIZED, - entries=[ TestCaseReport( - name="check_func_4", + name="After Stop", category=ReportCategories.SYNTHESIZED, entries=[ {"type": "Equal", "passed": False}, @@ -145,11 +133,11 @@ def test_empty_pre_post_steps(mockplan): category=ReportCategories.MULTITEST, entries=[ TestGroupReport( - name="After Start", + name="Environment Start", category=ReportCategories.SYNTHESIZED, entries=[ TestCaseReport( - name="check_func_2", + name="After Start", category=ReportCategories.SYNTHESIZED, ), ], diff --git a/tests/functional/testplan/testing/multitest/test_xfail.py b/tests/functional/testplan/testing/multitest/test_xfail.py index cde9c94f2..0ebbb3b36 100644 --- a/tests/functional/testplan/testing/multitest/test_xfail.py +++ b/tests/functional/testplan/testing/multitest/test_xfail.py @@ -81,7 +81,7 @@ def test_dynamic_xfail(): "reason": "known flaky", "strict": False, }, - "Testsuite Setup Error:After Start:error_hook": { + "Testsuite Setup Error:Environment Start:After Start": { "reason": "known flaky", "strict": False, }, diff --git a/tests/functional/testplan/testing/test_base.py b/tests/functional/testplan/testing/test_base.py index a41d3e29f..916091d7d 100644 --- a/tests/functional/testplan/testing/test_base.py +++ b/tests/functional/testplan/testing/test_base.py @@ -56,7 +56,7 @@ def read_test_data(self): ), ( os.path.join(fixture_root, "passing", "test_env.sh"), - base.passing.report.expected_report, + base.passing.report.expected_report_with_driver, dict( proc_env={ "proc_env1": "abc", diff --git a/tests/unit/testplan/runnable/interactive/test_irunner.py b/tests/unit/testplan/runnable/interactive/test_irunner.py index 6946fdf62..581b5fab1 100644 --- a/tests/unit/testplan/runnable/interactive/test_irunner.py +++ b/tests/unit/testplan/runnable/interactive/test_irunner.py @@ -14,6 +14,7 @@ from testplan.testing import ordering from testplan.testing.multitest import driver from testplan.runnable.interactive import base +from testplan.common.report.base import Status as ReportStatus from testplan.common.utils.path import default_runpath from testplan.common.utils.testing import check_report from testplan.runners.local import LocalRunner @@ -151,10 +152,13 @@ def test_run_all_tests(irunner, sync): assert ret.result() is None # The report tree should have been updated as a side-effect. - assert irunner.report.passed + # Env stop should not be triggered + assert irunner.report.status == ReportStatus.UNKNOWN assert len(irunner.report.entries) == 3 for test_report in irunner.report: - assert test_report.passed + assert test_report.entries[0].passed + assert test_report.entries[1].passed + assert test_report.entries[2].status == ReportStatus.UNKNOWN @pytest.mark.parametrize("sync", [True, False]) @@ -166,7 +170,10 @@ def test_run_test(irunner, sync): assert ret.result() is None # The test report should have been updated as a side effect. - assert irunner.report["test_1"].passed + assert irunner.report["test_1"].entries[0].passed + assert irunner.report["test_1"].entries[1].passed + # Env stop + assert irunner.report["test_1"].entries[2].status == ReportStatus.UNKNOWN @pytest.mark.parametrize("sync", [True, False]) @@ -326,13 +333,15 @@ def test_run_all_tagged_tests(tags, num_of_suite_entries): irunner.setup() irunner.run_all_tests(await_results=True) - assert irunner.report.passed + assert irunner.report.status == ReportStatus.UNKNOWN + assert irunner.report.entries[0].entries[1].passed assert len(irunner.report.entries) == 3 for test_report in irunner.report: - assert test_report.passed - assert len(test_report.entries) == 1 - assert len(test_report.entries[0].entries) == num_of_suite_entries - assert len(test_report.entries[0].entries[-1].entries) == 3 + assert test_report.status == ReportStatus.UNKNOWN + assert test_report.entries[1].passed + assert len(test_report.entries) == 3 + assert len(test_report.entries[1].entries) == num_of_suite_entries + assert len(test_report.entries[1].entries[-1].entries) == 3 irunner.teardown() @@ -346,25 +355,26 @@ def test_initial_report(irunner): assert initial_report.runtime_status == report.RuntimeStatus.READY assert len(initial_report.entries) == 3 for test_report in initial_report: - # Each Test contains one suite. + # Each Test contains three suite. assert test_report.status == report.Status.UNKNOWN assert test_report.runtime_status == report.RuntimeStatus.READY - assert len(test_report.entries) == 1 - for suite_report in test_report: - # Each suite contains two testcase. - assert suite_report.status == report.Status.UNKNOWN - assert suite_report.runtime_status == report.RuntimeStatus.READY - assert len(suite_report.entries) == 2 + assert len(test_report.entries) == 3 + suite_report = test_report.entries[1] - # The first entry in the suite report is a regular testcase. - testcase_report = suite_report.entries[0] - assert isinstance(testcase_report, report.TestCaseReport) - assert len(testcase_report.entries) == 0 + # Each suite contains two testcase. + assert suite_report.status == report.Status.UNKNOWN + assert suite_report.runtime_status == report.RuntimeStatus.READY + assert len(suite_report.entries) == 2 - # The second entry in the suite report is a parametrized testcase. - param_report = suite_report.entries[1] - assert isinstance(param_report, report.TestGroupReport) - assert len(param_report.entries) == 3 + # The first entry in the suite report is a regular testcase. + testcase_report = suite_report.entries[0] + assert isinstance(testcase_report, report.TestCaseReport) + assert len(testcase_report.entries) == 0 + + # The second entry in the suite report is a parametrized testcase. + param_report = suite_report.entries[1] + assert isinstance(param_report, report.TestGroupReport) + assert len(param_report.entries) == 3 def test_reload_report(irunner): @@ -409,14 +419,17 @@ def test_reload_report(irunner): # We reload and assert irunner.reload_report() + assert len(irunner.report) == 3 + for test in irunner.report: # A MultiTest should reset to ready upon changes underneath - assert test.runtime_status == ( - RuntimeStatus.FINISHED - if test.uid == "test_1" - else RuntimeStatus.READY - ) + assert test.runtime_status == RuntimeStatus.READY + if test.uid == "test_1": + assert test.entries[1].runtime_status == RuntimeStatus.FINISHED for suite in irunner.report[test.uid]: + if suite.uid in ("Environment Start", "Environment Stop"): + continue + # A testsuite should reset to ready upon changes underneath assert suite.runtime_status == ( RuntimeStatus.FINISHED