diff --git a/.github/workflows/_codecov.yaml b/.github/workflows/_codecov.yaml index 19b6a47..9582397 100644 --- a/.github/workflows/_codecov.yaml +++ b/.github/workflows/_codecov.yaml @@ -48,7 +48,7 @@ jobs: run: python -m pip install ".[dev,test]" - name: Generate coverage report - run: pytest --cov --cov-report=xml + run: pytest --retries 1 --cov --cov-report=xml - name: Upload coverage to Codecov uses: codecov/codecov-action@1e68e06f1dbfde0e4cefc87efeba9e4643565303 #v5.1.2 diff --git a/.github/workflows/_test.yaml b/.github/workflows/_test.yaml index f886de2..870c387 100644 --- a/.github/workflows/_test.yaml +++ b/.github/workflows/_test.yaml @@ -33,11 +33,10 @@ jobs: python-version: ${{ inputs.python-version }} - name: Install dependencies - run: | - python -m pip install --user --upgrade pip + run: python -m pip install --user --upgrade pip - name: Install package run: python -m pip install --user ".[dev,test]" - name: Run unit tests - run: pytest -vv tests + run: pytest -vv --retries 1 tests diff --git a/.github/workflows/cicd.yaml b/.github/workflows/cicd.yaml index c7f7408..8485538 100644 --- a/.github/workflows/cicd.yaml +++ b/.github/workflows/cicd.yaml @@ -41,7 +41,7 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest, macos-latest, windows-latest] - python-version: ["3.9", "3.10", "3.11", "3.12"] + python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"] with: os: ${{ matrix.os }} python-version: ${{ matrix.python-version }} @@ -65,7 +65,7 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest, windows-latest] - python-version: ["3.9", "3.10", "3.11", "3.12"] + python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"] with: os: ${{ matrix.os }} python-version: ${{ matrix.python-version }} diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index f964f5b..369746e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -5,7 +5,7 @@ repos: - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.7.0 + rev: v0.9.3 hooks: - id: ruff args: @@ -23,12 +23,12 @@ repos: # - --install-types # - --non-interactive - repo: https://github.com/codespell-project/codespell - rev: v2.3.0 + rev: v2.4.0 hooks: - id: codespell additional_dependencies: [tomli] - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.6.0 + rev: v5.0.0 hooks: # all available hooks can be found here: https://github.com/pre-commit/pre-commit-hooks/blob/main/.pre-commit-hooks.yaml - id: check-yaml @@ -72,7 +72,7 @@ repos: - id: isort args: [--profile=black] - repo: https://github.com/PyCQA/bandit - rev: 1.7.10 + rev: 1.8.2 hooks: - id: bandit exclude: "^tests/.*|examples/.*" diff --git a/Makefile b/Makefile index 23dc535..aec323f 100644 --- a/Makefile +++ b/Makefile @@ -39,9 +39,16 @@ install: test: $(PYTHON) -m pytest $(PYTEST_OPTS) $(TESTS) -.PHONY: tests +.PHONY: test tests: test +## retest Rerun tests that failed before +## +.PHONY: retest +retest: + $(PYTHON) -m pytest $(PYTEST_OPTS) --lf $(TESTS) + + ## wip Run tests marked as wip ## .PHONY: wip diff --git a/cmethods/__init__.py b/cmethods/__init__.py index eec4f34..b91ca6f 100644 --- a/cmethods/__init__.py +++ b/cmethods/__init__.py @@ -173,8 +173,8 @@ def cli(**kwargs) -> None: datefmt="%Y/%m/%d %H:%M:%S", level=logging.INFO, ) - - logging.info("Loading data sets ...") + log = logging.getLogger(__name__) + log.info("Loading data sets ...") try: for key, message in zip( ("obs", "simh", "simp"), @@ -194,15 +194,15 @@ def cli(**kwargs) -> None: ) kwargs[key] = kwargs[key][kwargs["variable"]] except (TypeError, KeyError) as exc: - logging.error(exc) + log.error(exc) sys.exit(1) - logging.info("Data sets loaded ...") + log.info("Data sets loaded ...") kwargs["n_quantiles"] = kwargs["quantiles"] del kwargs["quantiles"] - logging.info("Applying %s ..." % kwargs["method"]) + log.info("Applying %s ...", kwargs["method"]) result = adjust(**kwargs) - logging.info("Saving result to %s ..." % kwargs["output"]) + log.info("Saving result to %s ...", kwargs["output"]) result.to_netcdf(kwargs["output"]) diff --git a/cmethods/core.py b/cmethods/core.py index 8364f33..84afc01 100644 --- a/cmethods/core.py +++ b/cmethods/core.py @@ -21,7 +21,7 @@ from cmethods.scaling import linear_scaling as __linear_scaling from cmethods.scaling import variance_scaling as __variance_scaling from cmethods.static import SCALING_METHODS -from cmethods.utils import UnknownMethodError, check_xr_types +from cmethods.utils import UnknownMethodError, ensure_xr_dataarray if TYPE_CHECKING: from cmethods.types import XRData @@ -37,16 +37,16 @@ def apply_ufunc( method: str, - obs: XRData, - simh: XRData, - simp: XRData, + obs: xr.xarray.core.dataarray.DataArray, + simh: xr.xarray.core.dataarray.DataArray, + simp: xr.xarray.core.dataarray.DataArray, **kwargs: dict, -) -> XRData: +) -> xr.xarray.core.dataarray.DataArray: """ Internal function used to apply the bias correction technique to the passed input data. """ - check_xr_types(obs=obs, simh=simh, simp=simp) + ensure_xr_dataarray(obs=obs, simh=simh, simp=simp) if method not in __METHODS_FUNC__: raise UnknownMethodError(method, __METHODS_FUNC__.keys()) @@ -96,11 +96,11 @@ def apply_ufunc( def adjust( method: str, - obs: XRData, - simh: XRData, - simp: XRData, + obs: xr.xarray.core.dataarray.DataArray, + simh: xr.xarray.core.dataarray.DataArray, + simp: xr.xarray.core.dataarray.DataArray, **kwargs, -) -> XRData: +) -> xr.xarray.core.dataarray.DataArray | xr.xarray.core.dataarray.Dataset: """ Function to apply a bias correction technique on single and multidimensional data sets. For more information please refer to the method specific @@ -119,19 +119,19 @@ def adjust( :param method: Technique to apply :type method: str :param obs: The reference/observational data set - :type obs: XRData + :type obs: xr.xarray.core.dataarray.DataArray :param simh: The modeled data of the control period - :type simh: XRData + :type simh: xr.xarray.core.dataarray.DataArray :param simp: The modeled data of the period to adjust - :type simp: XRData + :type simp: xr.xarray.core.dataarray.DataArray :param kwargs: Any other method-specific parameter (like ``n_quantiles`` and ``kind``) :type kwargs: dict :return: The bias corrected/adjusted data set - :rtype: XRData + :rtype: xr.xarray.core.dataarray.DataArray | xr.xarray.core.dataarray.Dataset """ kwargs["adjust_called"] = True - check_xr_types(obs=obs, simh=simh, simp=simp) + ensure_xr_dataarray(obs=obs, simh=simh, simp=simp) if method == "detrended_quantile_mapping": # noqa: PLR2004 raise ValueError( @@ -169,6 +169,8 @@ def adjust( obs_group = group["obs"] simh_group = group["simh"] simp_group = group["simp"] + else: + raise ValueError("'group' must be a string or a dict!") del kwargs["group"] diff --git a/cmethods/utils.py b/cmethods/utils.py index 8e861e6..19d687b 100644 --- a/cmethods/utils.py +++ b/cmethods/utils.py @@ -51,11 +51,11 @@ def check_adjust_called( ) -def check_xr_types(obs: XRData, simh: XRData, simp: XRData) -> None: +def ensure_xr_dataarray(obs: XRData, simh: XRData, simp: XRData) -> None: """ Checks if the parameters are in the correct type. **only used internally** """ - phrase: str = "must be type xarray.core.dataarray.Dataset or xarray.core.dataarray.DataArray" + phrase: str = "must be type 'xarray.core.dataarray.DataArray'." if not isinstance(obs, XRData_t): raise TypeError(f"'obs' {phrase}") @@ -73,7 +73,7 @@ def check_np_types( """ Checks if the parameters are in the correct type. **only used internally** """ - phrase: str = "must be type list, np.ndarray or np.generic" + phrase: str = "must be type list, np.ndarray, or np.generic" if not isinstance(obs, NPData_t): raise TypeError(f"'obs' {phrase}") @@ -246,8 +246,8 @@ def get_adjusted_scaling_factor( "UnknownMethodError", "check_adjust_called", "check_np_types", - "check_xr_types", "ensure_dividable", + "ensure_xr_dataarray", "get_adjusted_scaling_factor", "get_cdf", "get_inverse_of_cdf", diff --git a/pyproject.toml b/pyproject.toml index 9e04e85..6ec1869 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -39,8 +39,11 @@ keywords = [ classifiers = [ "License :: OSI Approved :: GNU General Public License v3 (GPLv3)", "Programming Language :: Python", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: Utilities", "Topic :: Scientific/Engineering", @@ -99,7 +102,7 @@ dev = [ # linting "pylint", "flake8", - "ruff==0.3.5", + "ruff", # typing "mypy", ] @@ -107,12 +110,13 @@ test = [ # testing "pytest", "pytest-cov", + "pytest-retry", "zarr", "dask[distributed]", "scikit-learn", "scipy", ] -examples = ["click", "matplotlib"] +examples = ["matplotlib"] [tool.codespell] check-filenames = true diff --git a/tests/test_misc.py b/tests/test_misc.py index 70e4994..3f793c4 100644 --- a/tests/test_misc.py +++ b/tests/test_misc.py @@ -102,7 +102,7 @@ def test_not_implemented_errors( def test_adjust_failing_dqm(datasets: dict) -> None: with pytest.raises( ValueError, - match="This function is not available for detrended quantile mapping. " + match=r"This function is not available for detrended quantile mapping. " "Please use cmethods.CMethods.detrended_quantile_mapping", ): adjust( @@ -118,7 +118,7 @@ def test_adjust_failing_dqm(datasets: dict) -> None: def test_adjust_failing_no_group_for_distribution(datasets: dict) -> None: with pytest.raises( ValueError, - match="Can't use group for distribution based methods.", + match=r"Can't use group for distribution based methods.", ): adjust( method="quantile_mapping", diff --git a/tests/test_utils.py b/tests/test_utils.py index 585a911..c915296 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -25,8 +25,8 @@ from cmethods.static import MAX_SCALING_FACTOR from cmethods.utils import ( check_np_types, - check_xr_types, ensure_dividable, + ensure_xr_dataarray, get_adjusted_scaling_factor, get_pdf, nan_or_equal, @@ -133,7 +133,7 @@ def test_xr_type_check() -> None: correct. No error should occur. """ ds: xr.core.dataarray.Dataset = xr.core.dataarray.Dataset() - check_xr_types(obs=ds, simh=ds, simp=ds) + ensure_xr_dataarray(obs=ds, simh=ds, simp=ds) def test_type_check_failing() -> None: @@ -142,7 +142,7 @@ def test_type_check_failing() -> None: have the correct type. """ - phrase: str = "must be type list, np.ndarray or np.generic" + phrase: str = "must be type list, np.ndarray, or np.generic" with pytest.raises(TypeError, match=f"'obs' {phrase}"): check_np_types(obs=1, simh=[], simp=[]) @@ -177,7 +177,7 @@ def test_detrended_quantile_mapping_type_check_simp_failing(datasets: dict) -> N """n_quantiles must by type int""" with pytest.raises( TypeError, - match="'simp' must be type xarray.core.dataarray.DataArray", + match=r"'simp' must be type xarray.core.dataarray.DataArray", ): detrended_quantile_mapping( # type: ignore[attr-defined] obs=datasets["+"]["obsh"][:, 0, 0], @@ -222,7 +222,7 @@ def test_adjust_type_checking_failing() -> None: ) with pytest.raises( TypeError, - match="'obs' must be type xarray.core.dataarray.Dataset or xarray.core.dataarray.DataArray", + match=r"'obs' must be type 'xarray.core.dataarray.DataArray'.", ): adjust( method="linear_scaling", @@ -233,7 +233,7 @@ def test_adjust_type_checking_failing() -> None: ) with pytest.raises( TypeError, - match="'simh' must be type xarray.core.dataarray.Dataset or xarray.core.dataarray.DataArray", + match=r"'simh' must be type 'xarray.core.dataarray.DataArray'.", ): adjust( method="linear_scaling", @@ -245,7 +245,7 @@ def test_adjust_type_checking_failing() -> None: with pytest.raises( TypeError, - match="'simp' must be type xarray.core.dataarray.Dataset or xarray.core.dataarray.DataArray", + match=r"'simp' must be type 'xarray.core.dataarray.DataArray'.", ): adjust( method="linear_scaling",