diff --git a/doc/newsfragments/1642_new.json_lister.rst b/doc/newsfragments/1642_new.json_lister.rst new file mode 100644 index 000000000..324d2f710 --- /dev/null +++ b/doc/newsfragments/1642_new.json_lister.rst @@ -0,0 +1 @@ +Added a new lister that dumping testplan metadata to json. Try `--info json` to dump to stdout or `--info json:/var/tmp/tests.json` to dump to a file. \ No newline at end of file diff --git a/testplan/base.py b/testplan/base.py index b637f3637..3bebb77a3 100644 --- a/testplan/base.py +++ b/testplan/base.py @@ -1,5 +1,6 @@ """Testplan base module.""" import argparse +import json import os import random import signal @@ -15,6 +16,7 @@ from testplan.common.config import ConfigOption from testplan.common.utils import logger, path from testplan.common.utils.callable import arity +from testplan.common.utils.logger import TESTPLAN_LOGGER from testplan.common.utils.validation import has_method, is_subclass from testplan.environment import Environments from testplan.parser import TestplanParser @@ -23,6 +25,8 @@ from testplan.runners.local import LocalRunner from testplan.runners.base import Executor from testplan.testing import filtering, ordering +from testplan.testing.listing import Lister, MetadataBasedLister +from testplan.testing.multitest.test_metadata import TestPlanMetadata def pdb_drop_handler(sig, frame): @@ -160,6 +164,8 @@ class Testplan(entity.RunnableManager): :param test_lister: Tests listing class. :type test_lister: Subclass of :py:class:`BaseLister ` + :param test_lister_output: listing results goes to this file, if None goes to stdout + :type test_lister: PathLike object :param verbose: Enable or disable verbose mode. :type verbose: ``bool`` :param debug: Enable or disable debug mode. @@ -215,6 +221,7 @@ def __init__( test_filter=filtering.Filter(), test_sorter=ordering.NoopSorter(), test_lister=None, + test_lister_output=None, verbose=False, debug=False, timeout=defaults.TESTPLAN_TIMEOUT, @@ -274,6 +281,7 @@ def __init__( test_filter=test_filter, test_sorter=test_sorter, test_lister=test_lister, + test_lister_output=test_lister_output, verbose=verbose, debug=debug, timeout=timeout, @@ -416,6 +424,7 @@ def main_wrapper( test_filter=filtering.Filter(), test_sorter=ordering.NoopSorter(), test_lister=None, + test_lister_output=None, verbose=False, debug=False, timeout=defaults.TESTPLAN_TIMEOUT, @@ -474,6 +483,7 @@ def test_plan_inner_inner(): test_filter=test_filter, test_sorter=test_sorter, test_lister=test_lister, + test_lister_output=test_lister_output, verbose=verbose, debug=debug, timeout=timeout, @@ -494,6 +504,21 @@ def test_plan_inner_inner(): plan.abort() raise + lister: MetadataBasedLister = plan.cfg.test_lister + if lister is not None and lister.metadata_based: + output = lister.get_output( + TestPlanMetadata( + plan.cfg.name, + plan.cfg.description, + plan.get_test_metadata(), + ) + ) + if plan.cfg.test_lister_output: + with open(plan.cfg.test_lister_output, "wt") as file: + file.write(output) + else: + TESTPLAN_LOGGER.user_info(output) + plan_result = plan.run() plan_result.decorated_value = returned return plan_result diff --git a/testplan/runnable/base.py b/testplan/runnable/base.py index 6f3cc6a67..5b4abec2b 100644 --- a/testplan/runnable/base.py +++ b/testplan/runnable/base.py @@ -9,6 +9,7 @@ import uuid import webbrowser from collections import OrderedDict +from dataclasses import dataclass from typing import ( Any, Callable, @@ -57,13 +58,28 @@ from testplan.runnable.interactive import TestRunnerIHandler from testplan.runners.base import Executor from testplan.runners.pools.tasks import Task, TaskResult -from testplan.runners.pools.tasks.base import is_task_target +from testplan.runners.pools.tasks.base import ( + is_task_target, + TaskTargetInformation, + get_task_target_information, +) from testplan.testing import filtering, listing, ordering, tagging from testplan.testing.base import Test, TestResult +from testplan.testing.listing import Lister from testplan.testing.multitest import MultiTest from testplan.runners.pools.base import Pool +TestTask = Union[Test, Task, Callable] + + +@dataclass +class TaskInformation: + target: TestTask + materialized_test: Test + uid: str + + def get_exporters(values): """ Validation function for exporter declarations. @@ -180,8 +196,9 @@ def get_options(cls): # Test lister is None by default, otherwise Testplan would # list tests, not run them ConfigOption("test_lister", default=None): Or( - None, listing.BaseLister + None, listing.BaseLister, listing.MetadataBasedLister ), + ConfigOption("test_lister_output", default=None): Or(str, None), ConfigOption("verbose", default=False): bool, ConfigOption("debug", default=False): bool, ConfigOption("timeout", default=defaults.TESTPLAN_TIMEOUT): Or( @@ -251,6 +268,15 @@ def success(self): ) +CACHED_TASK_INFO_ATTRIBUTE = "_cached_task_info" + + +def _cache_task_info(task_info: TaskInformation): + task = task_info.target + setattr(task, CACHED_TASK_INFO_ATTRIBUTE, task_info) + return task + + class TestRunner(Runnable): r""" Adds tests to test @@ -352,6 +378,7 @@ class TestRunner(Runnable): def __init__(self, **options): super(TestRunner, self).__init__(**options) # uid to resource, in definition order + self._test_metadata = [] self._tests: MutableMapping[str, str] = OrderedDict() self._result.test_report = TestReport( name=self.cfg.name, @@ -399,6 +426,9 @@ def exporters(self): exporter.parent = self return self._exporters + def get_test_metadata(self): + return self._test_metadata + def disable_reset_report_uid(self): """Do not generate unique strings in uuid4 format as report uid""" self._reset_report_uid = False @@ -513,6 +543,105 @@ def _stop_remote_services(self): self.logger.info("Stopping Remote Server %s", name) rmt_svc.stop() + def _clone_task_for_part(self, task_info, _task_arguments, part): + _task_arguments["part"] = part + self.logger.debug( + "Task re-created with arguments: %s", + _task_arguments, + ) + _multitest_params = task_info.materialized_test.cfg._cfg_input + _multitest_params["part"] = part + target = Task(**_task_arguments) + materialized_test = task_info.materialized_test.__class__( + **_multitest_params + ) + target._uid = materialized_test.uid() + new_task = TaskInformation( + target, + materialized_test, + materialized_test.uid(), + ) + return new_task + + def _get_tasks( + self, _task_arguments, num_of_parts, runtime_data + ) -> List[TaskInformation]: + self.logger.debug( + "Task created with arguments: %s", + _task_arguments, + ) + task = Task(**_task_arguments) + task_info = self._collect_task_info(task) + + uid = task_info.uid + + tasks: List[TaskInformation] = [] + time_info = runtime_data.get(uid, None) + if num_of_parts: + + if not isinstance(task_info.materialized_test, MultiTest): + raise TypeError( + "multitest_parts specified in @task_target," + " but the Runnable is not a MultiTest" + ) + + if num_of_parts == "auto": + if not time_info: + self.logger.warning( + "%s parts is auto but cannot find it in runtime-data", + uid, + ) + num_of_parts = 1 + else: + num_of_parts = math.ceil( + time_info["execution_time"] + / ( + self.cfg.auto_part_runtime_limit + - time_info["setup_time"] + ) + ) + if "weight" not in _task_arguments: + _task_arguments["weight"] = _task_arguments.get( + "weight", None + ) or ( + math.ceil( + (time_info["execution_time"] / num_of_parts) + + time_info["setup_time"] + ) + if time_info + else self.cfg.auto_part_runtime_limit + ) + self.logger.user_info( + "%s: parts=%d, weight=%d", + uid, + num_of_parts, + _task_arguments["weight"], + ) + if num_of_parts == 1: + task_info.target.weight = _task_arguments["weight"] + tasks.append(task_info) + else: + for i in range(num_of_parts): + + part = (i, num_of_parts) + new_task = self._clone_task_for_part( + task_info, _task_arguments, part + ) + + tasks.append(new_task) + + else: + if time_info and not task.weight: + task_info.target.weight = math.ceil( + time_info["execution_time"] + time_info["setup_time"] + ) + self.logger.user_info( + "%s: weight=%d", uid, task_info.target.weight + ) + tasks.append(task_info) + + return tasks + def discover( self, path: str = ".", @@ -534,76 +663,10 @@ def discover( path, ) regex = re.compile(name_pattern) - tasks = [] + tasks: List[TaskInformation] = [] runtime_data: dict = self.cfg.runtime_data or {} - def _add_task(_target, _task_arguments): - self.logger.debug( - "Task created with arguments: %s", - _task_arguments, - ) - task = Task(**_task_arguments) - uid = self._verify_test_target(task) - - # nothing to run - if not uid: - return - time_info = runtime_data.get(uid, None) - if getattr(_target, "__multitest_parts__", None): - num_of_parts = _target.__multitest_parts__ - if num_of_parts == "auto": - if not time_info: - self.logger.warning( - "%s parts is auto but cannot find it in runtime-data", - uid, - ) - num_of_parts = 1 - else: - num_of_parts = math.ceil( - time_info["execution_time"] - / ( - self.cfg.auto_part_runtime_limit - - time_info["setup_time"] - ) - ) - if "weight" not in _task_arguments: - _task_arguments["weight"] = _task_arguments.get( - "weight", None - ) or ( - math.ceil( - (time_info["execution_time"] / num_of_parts) - + time_info["setup_time"] - ) - if time_info - else self.cfg.auto_part_runtime_limit - ) - self.logger.user_info( - "%s: parts=%d, weight=%d", - uid, - num_of_parts, - _task_arguments["weight"], - ) - if num_of_parts == 1: - task.weight = _task_arguments["weight"] - tasks.append(task) - else: - for i in range(num_of_parts): - _task_arguments["part"] = (i, num_of_parts) - self.logger.debug( - "Task re-created with arguments: %s", - _task_arguments, - ) - tasks.append(Task(**_task_arguments)) - - else: - if time_info and not task.weight: - task.weight = math.ceil( - time_info["execution_time"] + time_info["setup_time"] - ) - self.logger.user_info("%s: weight=%d", uid, task.weight) - tasks.append(task) - for root, dirs, files in os.walk(path or "."): for filename in files: if not regex.match(filename): @@ -621,15 +684,17 @@ def _add_task(_target, _task_arguments): self.logger.debug( "Discovered task target %s::%s", filepath, attr ) + + task_target_info = get_task_target_information(target) task_arguments = dict( target=attr, module=module, path=root, - **target.__task_kwargs__, + **task_target_info.task_kwargs, ) - if target.__target_params__: - for param in target.__target_params__: + if task_target_info.target_params: + for param in task_target_info.target_params: if isinstance(param, dict): task_arguments["args"] = None task_arguments["kwargs"] = param @@ -643,11 +708,23 @@ def _add_task(_target, _task_arguments): " received: {param}" ) task_arguments["part"] = None - _add_task(target, task_arguments) + tasks.extend( + self._get_tasks( + task_arguments, + task_target_info.multitest_parts, + runtime_data, + ) + ) else: - _add_task(target, task_arguments) + tasks.extend( + self._get_tasks( + task_arguments, + task_target_info.multitest_parts, + runtime_data, + ) + ) - return tasks + return [_cache_task_info(task_info) for task_info in tasks] def calculate_pool_size_by_tasks(self, tasks: Collection[Task]) -> int: """ @@ -761,75 +838,68 @@ def add( :return: Assigned uid for test. :rtype: ``str`` or ```NoneType`` """ + + # Get the real test entity and verify if it should be added + task_info = self._collect_task_info(target) + local_runner = self.resources.first() - resource: Union[Executor, str, None] = resource or local_runner + resource: Union[str, None] = resource or local_runner if resource not in self.resources: raise RuntimeError( 'Resource "{}" does not exist.'.format(resource) ) - # Get the real test entity and verify if it should be added - uid = self._verify_test_target(target) - if not uid: - return None + self._verify_task_info(task_info) - # Reset the task uid which will be used for test result transport in - # a pool executor, it makes logging or debugging easier. - if isinstance(target, Task): - target._uid = uid + uid = task_info.uid - # In batch mode the original target is added into executors, it can be: - # 1> A runnable object (generally a test entity or customized by user) - # 2> A callable that returns a runnable object - # 3> A task that wrapped a runnable object - if uid in self._tests: - raise ValueError( - '{} with uid "{}" already added.'.format(self._tests[uid], uid) - ) + # let see if it is filtered + if not self._should_task_running(task_info): + return None - self._tests[uid] = resource - self.resources[resource].add(target, uid) - return uid + # "--list" option always means not executing tests + lister: Lister = self.cfg.test_lister + if lister is not None and not lister.metadata_based: + self.cfg.test_lister.log_test_info(task_info.materialized_test) + return None - def _verify_test_target( - self, target: Union[Test, Task, Callable] - ) -> Optional[str]: - """ - Materialize the test target, and: - - check uniqueness - - check against filter - - check against lister - - check runnable type - - cut corner for interactive mode - Return the runnable uid if it should run, otherwise None. - """ - # The target added into TestRunner can be: 1> a real test entity - # 2> a task wraps a test entity 3> a callable returns a test entity + if self.cfg.interactive_port is not None: + self._register_task_for_interactive(task_info) + # for interactive always use the local runner + resource = local_runner - if hasattr(target, "uid"): - key = target.uid() - else: - key = id(target) + target = task_info.target + # if running in the local runner we can just enqueue the materialized test + if resource == local_runner: + target = task_info.materialized_test - if key in self._verified_targets: - return self._verified_targets[key] - else: - self._verified_targets[key] = None + self._register_task( + resource, target, uid, task_info.materialized_test.get_metadata() + ) + return uid + + def _register_task(self, resource, target, uid, metadata): + self._tests[uid] = resource + self._test_metadata.append(metadata) + self.resources[resource].add(target, uid) + def _collect_task_info(self, target: TestTask) -> TaskInformation: if isinstance(target, Test): target_test = target elif isinstance(target, Task): - target_test = target.materialize() - if self.cfg.interactive_port is not None and isinstance( - target._target, str - ): - self.scheduled_modules.append( - ( - target._module or target._target.rsplit(".", 1)[0], - os.path.abspath(target._path), - ) - ) + + # First check if there is a cached task info + # that is an optimization flew where task info + # need to be created at discover, but the already defined api + # need to pass Task, so we attach task_info to the task itself + # and here we remove it + if hasattr(target, CACHED_TASK_INFO_ATTRIBUTE): + task_info = getattr(target, CACHED_TASK_INFO_ATTRIBUTE) + setattr(target, CACHED_TASK_INFO_ATTRIBUTE, None) + return task_info + else: + target_test = target.materialize() elif callable(target): target_test = target() else: @@ -841,49 +911,54 @@ def _verify_test_target( target_test.parent = self target_test.cfg.parent = self.cfg - # verify runnable is unique uid = target_test.uid() + + # Reset the task uid which will be used for test result transport in + # a pool executor, it makes logging or debugging easier. + + # TODO: This mutating target should we do a copy? + if isinstance(target, Task): + target._uid = uid + + return TaskInformation(target, target_test, uid) + + def _register_task_for_interactive(self, task_info: TaskInformation): + target = task_info.target + if isinstance(target, Task) and isinstance(target._target, str): + self.scheduled_modules.append( + ( + target._module or target._target.rsplit(".", 1)[0], + os.path.abspath(target._path), + ) + ) + + def _verify_task_info(self, task_info: TaskInformation) -> None: + uid = task_info.uid + if uid in self._tests: + raise ValueError( + '{} with uid "{}" already added.'.format(self._tests[uid], uid) + ) + if uid in self._runnable_uids: raise RuntimeError( f"Runnable with uid {uid} has already been verified" ) else: + # TODO: this should be part of the add self._runnable_uids.add(uid) - self._verified_targets[key] = uid - - # if filter is defined + def _should_task_running(self, task_info: TaskInformation) -> bool: + should_run = True if type(self.cfg.test_filter) is not filtering.Filter: - should_run = target_test.should_run() + test = task_info.materialized_test + should_run = test.should_run() self.logger.debug( "Should run %s? %s", - target_test.name, + test.name, "Yes" if should_run else "No", ) - if not should_run: - return None - - # "--list" option always means not executing tests - if self.cfg.test_lister is not None: - self.cfg.test_lister.log_test_info(target_test) - return None - if getattr(target, "__multitest_parts__", None): - if not isinstance(target_test, MultiTest): - raise TypeError( - "multitest_parts specified in @task_target," - " but the Runnable is not a MultiTest" - ) - - # cut corner for interactive mode as we already have the runnable - # so add it to local runner and continue - if self.cfg.interactive_port is not None: - local_runner = self.resources.first() - self._tests[uid] = local_runner - self.resources[local_runner].add(target_test, uid) - return None - - return uid + return should_run def _record_start(self): self.report.timer.start("run") diff --git a/testplan/runners/pools/tasks/base.py b/testplan/runners/pools/tasks/base.py index 66c04150c..8b5a03baf 100644 --- a/testplan/runners/pools/tasks/base.py +++ b/testplan/runners/pools/tasks/base.py @@ -5,7 +5,22 @@ import os import warnings from collections import OrderedDict -from typing import Optional, Tuple, Union, Dict, Sequence, Callable +from dataclasses import dataclass +from typing import ( + Optional, + Tuple, + Union, + Dict, + Sequence, + Callable, + Any, +) + +try: + from typing import Literal +except ImportError: + from typing_extensions import Literal + from testplan.common.entity import Runnable from testplan.testing.base import Test, TestResult @@ -342,9 +357,16 @@ def uid(self): return strings.uuid4() +@dataclass +class TaskTargetInformation: + target_params: Sequence[Union[Sequence, dict]] + task_kwargs: Dict[str, Any] + multitest_parts: Union[int, str, None] + + def task_target( parameters: Union[Callable, Sequence[Union[Sequence, dict]]] = None, - multitest_parts: Union[int, str, None] = None, + multitest_parts: Union[int, Literal["auto"], None] = None, **kwargs, ): """ @@ -366,30 +388,32 @@ def task_target( # `task_target` is used without parentheses, then `parameters` is the # real callable object (task target) to be decorated. if callable(parameters) and len(kwargs) == 0: - set_task_target(parameters) - parameters.__target_params__ = None - parameters.__task_kwargs__ = {} - return parameters + func = parameters + set_task_target(func, TaskTargetInformation(None, {}, None)) + return func def inner(func): - set_task_target(func) - func.__target_params__ = parameters - func.__multitest_parts__ = multitest_parts - func.__task_kwargs__ = kwargs + set_task_target( + func, TaskTargetInformation(parameters, kwargs, multitest_parts) + ) return func return inner -def set_task_target(func): +def set_task_target(func: Callable, info: TaskTargetInformation): """ Mark a callable object as a task target which can be packaged in a :py:class:`~testplan.runners.pools.tasks.base.Task` object. """ - func.__is_task_target__ = True + func.__task_target_info__ = info def is_task_target(func): """Check if a callable object is a task target.""" - return getattr(func, "__is_task_target__", False) + return getattr(func, "__task_target_info__", False) + + +def get_task_target_information(func) -> TaskTargetInformation: + return getattr(func, "__task_target_info__") diff --git a/testplan/testing/base.py b/testplan/testing/base.py index 27930ece3..ccd4952f3 100644 --- a/testplan/testing/base.py +++ b/testplan/testing/base.py @@ -35,6 +35,7 @@ from testplan.testing.environment import TestEnvironment, parse_dependency from testplan.testing.multitest.entries.assertions import RawAssertion from testplan.testing.multitest.entries.base import Attachment +from testplan.testing.multitest.test_metadata import TestMetadata TEST_INST_INDENT = 2 SUITE_INDENT = 4 @@ -226,6 +227,9 @@ def get_stdout_style(self, passed): """Stdout style for status.""" return self.stdout_style.get_style(passing=passed) + def get_metadata(self) -> TestMetadata: + return TestMetadata(self.name, self.description, []) + def uid(self): """Instance name uid.""" return self.cfg.name diff --git a/testplan/testing/listing.py b/testplan/testing/listing.py index e15a2b39d..0208c5617 100644 --- a/testplan/testing/listing.py +++ b/testplan/testing/listing.py @@ -1,20 +1,40 @@ """ This module contains logic for listing representing test context of a plan. """ +import dataclasses +import json import os +from argparse import Action, ArgumentParser, Namespace from enum import Enum +from os import PathLike +from typing import List, Union, Sequence, Any, Tuple +from urllib.parse import urlparse from testplan.common.utils.parser import ArgMixin from testplan.common.utils.logger import TESTPLAN_LOGGER from testplan.testing import tagging from testplan.testing.multitest import MultiTest +from testplan.testing.multitest.test_metadata import TestPlanMetadata INDENT = " " MAX_TESTCASES = 25 -class BaseLister: +class Listertype: + NAME = None + DESCRIPTION = None + + metadata_based = False + + def name(self): + return self.NAME + + def description(self): + return self.DESCRIPTION + + +class BaseLister(Listertype): """ Base of all listers, implement the :py:meth:`get_output` give it a name in :py:attr:`NAME` and a description in :py:attr:`DESCRIPTION` or alternatively @@ -22,9 +42,6 @@ class BaseLister: added to :py:data:`listing_registry`. """ - NAME = None - DESCRIPTION = None - def log_test_info(self, instance): output = self.get_output(instance) if output: @@ -33,12 +50,6 @@ def log_test_info(self, instance): def get_output(self, instance): raise NotImplementedError - def name(self): - return self.NAME - - def description(self): - return self.DESCRIPTION - class ExpandedNameLister(BaseLister): """ @@ -222,11 +233,66 @@ def get_output(self, instance): return "" +class store_lister_and_path(Action): + def __call__( + self, + parser: ArgumentParser, + namespace: Namespace, + values: Tuple[Listertype, PathLike], + option_string: Union[str, None] = None, + ) -> None: + setattr(namespace, self.dest, values[0]) + setattr(namespace, f"{self.dest}_output", values[1]) + + class ListingArgMixin(ArgMixin): + @classmethod + def parse(cls, arg): + uri = urlparse(arg) + lister, path = (uri.scheme, uri.path) if uri.scheme else (arg, None) + return super().parse(lister), path + @classmethod def get_descriptions(cls): return dict([(lister, lister.value.description()) for lister in cls]) + @classmethod + def get_parser_context(cls, default=None, **kwargs): + return dict( + **super().get_parser_context(default, **kwargs), + action=store_lister_and_path, + ) + + +class MetadataBasedLister(Listertype): + """ + Base of all metadata based listers, implement the :py:meth:`get_output` give it a name in + :py:attr:`NAME` and a description in :py:attr:`DESCRIPTION` or alternatively + override :py:meth:`name` and/or :py:meth:`description` and it is good to be + added to :py:data:`listing_registry`. + """ + + metadata_based = True + + def log_test_info(self, metadata: TestPlanMetadata): + output = self.get_output(metadata) + if output: + TESTPLAN_LOGGER.user_info(output) + + def get_output(self, metadata: TestPlanMetadata): + raise NotImplementedError + + +class SimpleJsonLister(MetadataBasedLister): + NAME = "JSON" + DESCRIPTION = ( + "Dump test information in json. " + "Can take json:/path/to/output.json as well, then the result is dumped to the file" + ) + + def get_output(self, metadata: TestPlanMetadata): + return json.dumps(dataclasses.asdict(metadata), indent=2) + class ListingRegistry: """ @@ -235,7 +301,7 @@ class ListingRegistry: """ def __init__(self): - self.listers = [] + self.listers: List[Listertype] = [] def add_lister(self, lister): self.listers.append(lister) @@ -261,3 +327,7 @@ def to_arg(self): listing_registry.add_lister(ExpandedPatternLister()) listing_registry.add_lister(ExpandedNameLister()) listing_registry.add_lister(CountLister()) +listing_registry.add_lister(SimpleJsonLister()) + + +Lister = Union[BaseLister, MetadataBasedLister] diff --git a/testplan/testing/multitest/base.py b/testplan/testing/multitest/base.py index 935c68bbb..2fad76a42 100644 --- a/testplan/testing/multitest/base.py +++ b/testplan/testing/multitest/base.py @@ -33,6 +33,8 @@ from testplan.testing.multitest import suite as mtest_suite from testplan.testing.multitest.entries import base as entries_base from testplan.testing.multitest.result import report_target +from testplan.testing.multitest.suite import get_suite_metadata +from testplan.testing.multitest.test_metadata import TestMetadata def iterable_suites(obj): @@ -714,6 +716,13 @@ def stop_test_resources(self): if self.cfg.after_stop: self._wrap_run_step(label="after_stop", func=self.cfg.after_stop)() + def get_metadata(self) -> TestMetadata: + return TestMetadata( + name=self.name, + description=self.cfg.description, + test_suites=[get_suite_metadata(suite) for suite in self.suites], + ) + @property def _thread_pool_size(self): """ diff --git a/testplan/testing/multitest/suite.py b/testplan/testing/multitest/suite.py index 134e867a6..7322944c9 100644 --- a/testplan/testing/multitest/suite.py +++ b/testplan/testing/multitest/suite.py @@ -1,18 +1,26 @@ """Multitest testsuite/testcase module.""" import collections import copy +import dataclasses import functools import inspect import itertools import types import warnings -from typing import Optional +from typing import Optional, Callable from testplan import defaults from testplan.common.utils import interface, strings from testplan.testing import tagging from . import parametrization +from .test_metadata import ( + LocationMetadata, + TestSuiteMetadata, + TestSuiteStaticMetadata, + TestCaseStaticMetadata, + TestCaseMetadata, +) # Global variables __TESTCASES__ = [] @@ -20,6 +28,10 @@ __GENERATED_TESTCASES__ = [] +TESTCASE_METADATA_ATTRIBUTE = "__testcase_metadata__" +TESTSUITE_METADATA_ATTRIBUTE = "__testsuite_metadata__" + + def _reset_globals(): # pylint: disable=global-statement global __TESTCASES__ @@ -433,6 +445,12 @@ def _testsuite(klass): # Suite resolved, clear global variables for resolving the next suite. _reset_globals() + setattr( + klass, + TESTSUITE_METADATA_ATTRIBUTE, + TestSuiteStaticMetadata(LocationMetadata.from_object(klass)), + ) + return klass @@ -567,28 +585,12 @@ def _mark_function_as_testcase(func): def _testcase(function): - """Actual decorator that validates & registers a method as a testcase.""" - global __TESTCASES__ - - # Attributes `name` and `__tags__` are added only when function is - # decorated by @testcase(...) which has the following parentheses. - if not hasattr(function, "name"): - _validate_function_name(function) - function.name = function.__name__ - - if not hasattr(function, "__tags__"): - function.__tags__ = {} - function.__tags_index__ = {} - - _validate_testcase(function) - _mark_function_as_testcase(function) - function.__seq_number__ = _number_of_testcases() - function.__skip__ = [] + return _testcase_meta()(function) - __TESTCASES__.append(function.__name__) - return function +def add_testcase_metadata(func: Callable, metadata: TestCaseStaticMetadata): + setattr(func, TESTCASE_METADATA_ATTRIBUTE, metadata) def _testcase_meta( @@ -613,7 +615,7 @@ def _testcase_meta( @functools.wraps(_testcase) def wrapper(function): - """Meta logic for test case goes here.""" + """Actual decorator that validates & registers a method as a testcase.""" global __TESTCASES__ global __GENERATED_TESTCASES__ global __PARAMETRIZATION_TEMPLATE__ @@ -672,6 +674,13 @@ def wrapper(function): __GENERATED_TESTCASES__.append(func) + add_testcase_metadata( + func, + TestCaseStaticMetadata( + LocationMetadata.from_object(function) + ), + ) + return function else: @@ -698,6 +707,11 @@ def wrapper(function): function = wrapper_func(function) __TESTCASES__.append(function.__name__) + + add_testcase_metadata( + function, + TestCaseStaticMetadata(LocationMetadata.from_object(function)), + ) return function return wrapper @@ -898,3 +912,34 @@ def inner(function): return function return inner + + +def get_testcase_metadata(testcase: object): + static_metadata = getattr( + testcase, + TESTCASE_METADATA_ATTRIBUTE, + ) + + return TestCaseMetadata( + **dataclasses.asdict(static_metadata), + name=testcase.name, + description=testcase.__doc__, + ) + + +def get_suite_metadata(suite: object) -> TestSuiteMetadata: + static_metadata: TestSuiteStaticMetadata = getattr( + suite, TESTSUITE_METADATA_ATTRIBUTE + ) + testcase_metadata = [ + get_testcase_metadata(tc) + for _, tc in inspect.getmembers(suite) + if hasattr(tc, TESTCASE_METADATA_ATTRIBUTE) + ] + + return TestSuiteMetadata( + **dataclasses.asdict(static_metadata), + name=suite.name, + description=get_testsuite_desc(suite), + test_cases=testcase_metadata, + ) diff --git a/testplan/testing/multitest/test_metadata.py b/testplan/testing/multitest/test_metadata.py new file mode 100644 index 000000000..1df02074c --- /dev/null +++ b/testplan/testing/multitest/test_metadata.py @@ -0,0 +1,70 @@ +from dataclasses import dataclass, field +from inspect import getsourcefile, getsourcelines +from typing import List, Optional, Union + + +@dataclass +class LocationMetadata: + + object_name: str + file: str + line_no: int + + @classmethod + def from_object(cls, obj): + object_name = obj.__name__ + file = getsourcefile(obj) + _, line_no = getsourcelines(obj) + return cls(object_name, file, line_no) + + +@dataclass +class TestCaseStaticMetadata: + location: LocationMetadata + + +@dataclass +class BasicInfo: + name: str + description: Union[str, None] + id: Union[str, None] = field(default=None, init=False) + + +@dataclass +class TestCaseMetadata(TestCaseStaticMetadata, BasicInfo): + pass + + +@dataclass +class TestSuiteStaticMetadata: + + location: LocationMetadata + + +@dataclass +class TestSuiteMetadata(TestSuiteStaticMetadata, BasicInfo): + + test_cases: List[TestCaseMetadata] + + +@dataclass +class TestMetadata(BasicInfo): + test_suites: List[TestSuiteMetadata] + + def __post_init__(self): + # computing ids propagating parent ids, this assume that the metadat is used in an + # immutable manner. If it is used in a mutable way compute_ids need to be called to + # recalculate ids for all siblings + self.compute_ids() + + def compute_ids(self): + self.id = self.name + for suite in self.test_suites: + suite.id = f"{self.id}:{suite.name}" + for tc in suite.test_cases: + tc.id = f"{suite.id}:{tc.name}" + + +@dataclass +class TestPlanMetadata(BasicInfo): + tests: List[TestMetadata] diff --git a/tests/functional/testplan/runners/pools/test_pool_remote.py b/tests/functional/testplan/runners/pools/test_pool_remote.py index 9b9e0b72d..71ae3b7be 100644 --- a/tests/functional/testplan/runners/pools/test_pool_remote.py +++ b/tests/functional/testplan/runners/pools/test_pool_remote.py @@ -112,5 +112,8 @@ def test_materialization_fail(mockplan): assert res.run is False assert res.success is False assert mockplan.report.status == Status.ERROR - assert mockplan.report.entries[0].name == "Task[target_raises_in_worker]" + assert ( + mockplan.report.entries[0].name + == "Task[target_raises_in_worker(uid=MTest)]" + ) assert mockplan.report.entries[0].category == Status.ERROR diff --git a/tests/unit/testplan/testing/test_listing.py b/tests/unit/testplan/testing/test_listing.py index 4632d0791..52c23bb56 100644 --- a/tests/unit/testplan/testing/test_listing.py +++ b/tests/unit/testplan/testing/test_listing.py @@ -14,11 +14,18 @@ def test_defaults(): - assert len(listing_registry.listers) == 5 + assert len(listing_registry.listers) == 6 arg_enum = listing_registry.to_arg() assert issubclass(arg_enum, ArgMixin) - for enum in ["NAME", "NAME_FULL", "COUNT", "PATTERN", "PATTERN_FULL"]: + for enum in [ + "NAME", + "NAME_FULL", + "COUNT", + "PATTERN", + "PATTERN_FULL", + "JSON", + ]: assert arg_enum[enum] @@ -52,7 +59,8 @@ def test_help_text(): assert match name, text = match.groups() - lister = arg_enum.parse(name) + lister, path = arg_enum.parse(name) assert lister + assert not path assert lister.description() == text