diff --git a/.coveragerc b/.coveragerc new file mode 100644 index 00000000..68ecdd76 --- /dev/null +++ b/.coveragerc @@ -0,0 +1,4 @@ +[report] +exclude_lines = + pragma: no cover + if\s+(typing\.)?TYPE_CHECKING: diff --git a/.github/linters/.ruff.toml b/.github/linters/.ruff.toml index ec84ebfc..07f6ed60 100644 --- a/.github/linters/.ruff.toml +++ b/.github/linters/.ruff.toml @@ -59,6 +59,8 @@ ignore = [ "FBT002", # builtin-attribute-shadowing (not an issue) "A003", + # implicit-return (can add a return even though all cases are covered) + "RET503", # superfluous-else-return (sometimes it's more readable) "RET505", # superfluous-else-raise (sometimes it's more readable) diff --git a/.github/workflows/issue.yml b/.github/workflows/issue.yml index 6d207b6f..fcd4fe6e 100644 --- a/.github/workflows/issue.yml +++ b/.github/workflows/issue.yml @@ -9,7 +9,7 @@ jobs: add-to-project: runs-on: ubuntu-latest steps: - - uses: actions/add-to-project@v0.4.1 + - uses: actions/add-to-project@v0.5.0 with: project-url: https://github.com/orgs/Safe-DS/projects/4 github-token: ${{ secrets.ADD_TO_PROJECT_PAT }} diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 32ea80eb..cf9b82cf 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -18,3 +18,5 @@ jobs: python-version: ${{ matrix.python-version }} module-name: library_analyzer coverage: ${{ matrix.python-version == '3.10' }} + secrets: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} diff --git a/.github/workflows/megalinter.yml b/.github/workflows/megalinter.yml index ba3452f1..599a7e87 100644 --- a/.github/workflows/megalinter.yml +++ b/.github/workflows/megalinter.yml @@ -2,12 +2,14 @@ name: MegaLinter on: pull_request: - branches: [main] - merge_group: + branches: [ main ] jobs: megalinter: - if: ${{ github.event_name == 'pull_request' }} uses: lars-reimann/.github/.github/workflows/megalinter-reusable.yml@main + permissions: + contents: write + issues: write + pull-requests: write secrets: PAT: ${{ secrets.PAT }} diff --git a/.github/workflows/pr.yml b/.github/workflows/pr.yml index 2faa3b36..7d0ac2c3 100644 --- a/.github/workflows/pr.yml +++ b/.github/workflows/pr.yml @@ -22,3 +22,5 @@ jobs: python-version: ${{ matrix.python-version }} module-name: library_analyzer coverage: ${{ matrix.python-version == '3.10' }} + secrets: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} diff --git a/.gitignore b/.gitignore index eab7fca5..82e81014 100644 --- a/.gitignore +++ b/.gitignore @@ -37,7 +37,7 @@ coverage.xml # mkdocs /site/ -/docs/reference/safeds/ +/docs/reference/library_analyzer/ /docs/reference/SUMMARY.md # MegaLinter @@ -45,5 +45,5 @@ report/ megalinter-reports/ # Other -.DS_Store/ +.DS_Store *.log diff --git a/.mega-linter.yml b/.mega-linter.yml index f1e96608..87d02282 100644 --- a/.mega-linter.yml +++ b/.mega-linter.yml @@ -17,5 +17,4 @@ JSON_PRETTIER_FILE_EXTENSIONS: # Commands PRE_COMMANDS: - - command: npm install @lars-reimann/prettier-config - cwd: workspace + - command: npm i @lars-reimann/prettier-config diff --git a/docs/README.md b/docs/README.md index 78343111..d666698d 100644 --- a/docs/README.md +++ b/docs/README.md @@ -3,14 +3,10 @@ [![PyPI](https://img.shields.io/pypi/v/library-analyzer)](https://pypi.org/project/library-analyzer) [![Main](https://github.com/Safe-DS/Library-Analyzer/actions/workflows/main.yml/badge.svg)](https://github.com/Safe-DS/Library-Analyzer/actions/workflows/main.yml) [![codecov](https://codecov.io/gh/Safe-DS/Library-Analyzer/branch/main/graph/badge.svg?token=UyCUY59HKM)](https://codecov.io/gh/Safe-DS/Library-Analyzer) -[![Documentation Status](https://readthedocs.org/projects/library-analyzer/badge/?version=stable)](https://library-analyzer.safe-ds.com) +[![Documentation Status](https://readthedocs.org/projects/library-analyzer/badge/?version=stable)](https://library-analyzer.safeds.com) Analysis of Python libraries and code that uses them. -## Documentation - -You can find the full documentation [here](https://library-analyzer.safe-ds.com). - ## Installation Get the latest version from [PyPI](https://pypi.org/project/library-analyzer): @@ -19,6 +15,10 @@ Get the latest version from [PyPI](https://pypi.org/project/library-analyzer): pip install library-analyzer ``` +## Documentation + +You can find the full documentation [here](https://library-analyzer.safeds.com). + ## Example usage 1. Analyze the API of a library: @@ -37,3 +37,14 @@ pip install library-analyzer ```shell analyze-library migrate -a1 data/api/scikit-learn_v0.24.2_api.json -a2 data/api/sklearn__apiv2.json -a data/annotations/annotations.json -o out ``` + +## Contributing + +We welcome contributions from everyone. As a starting point, check the following resources: + +* [Setting up a development environment](https://library-analyzer.safeds.com/en/latest/development/environment/) +* [Contributing page](https://github.com/Safe-DS/Library-Analyzer/contribute) + +If you need further help, please [use our discussion forum][forum]. + +[forum]: https://github.com/orgs/Safe-DS/discussions diff --git a/docs/development/environment.md b/docs/development/environment.md index a4353a46..fb2a62b3 100644 --- a/docs/development/environment.md +++ b/docs/development/environment.md @@ -2,29 +2,88 @@ This document describes how to configure and use your development environment. -!!! note - - All terminal commands listed below are assumed to be run from the root of the repository. +## Prerequisites -## Initial setup +You must complete these steps once before you can start setting up the project itself: 1. Install [Python 3.10](https://www.python.org/downloads/). -2. Install [poetry](https://python-poetry.org/docs/master/#installation). -3. Install dependencies of this project by running this command: +2. Verify that `python` can be launched by running this command in a **new** terminal: ```shell - poetry install + python --version ``` + If this fails, add the directory that contains the `python` executable to your `PATH` environment variable. -## Running the tests - -1. Run this command: +3. Install [Poetry](https://python-poetry.org/docs/master/#installing-with-the-official-installer) with the official installer. Follow the instructions for your operating system in the linked document. +4. Verify that `poetry` can be launched by running this command in a **new** terminal: ```shell - poetry run pytest + poetry --version ``` + If this fails, add the directory that contains the `poetry` executable to your `PATH` environment variable. + +## Project setup + +Follow the instructions for your preferred IDE. If you want to use neither [PyCharm](https://www.jetbrains.com/pycharm/) nor [Visual Studio Code](https://code.visualstudio.com/), use the generic instructions. You only need to do these steps once. + +!!! note + + All terminal commands listed in this section are assumed to be run from the root of the repository. + +=== "PyCharm" + + 1. Clone the repository. + 2. Open the project folder in PyCharm. + 3. Follow the instructions in the [PyCharm documentation](https://www.jetbrains.com/help/pycharm/poetry.html#poetry-env) to create a **new** Poetry environment and to install the dependencies of the project. + 4. Open the PyCharm settings and search for "Python Integrated Tools". Set the "Default test runner" to "pytest" and the "Docstring format" to "NumPy". Your settings should look like this: + ![PyCharm settings "Python Integrated Tools"](./img/pycharm_python_integrated_tools.png) + +=== "Visual Studio Code" + + 1. Clone the repository. + 2. Open the project folder in Visual Studio Code. + 3. Install the [Python extension](https://marketplace.visualstudio.com/items?itemName=ms-python.python). + 4. Create a new Poetry environment and install the dependencies of the project by running this command: + ```shell + poetry install + ``` + 5. Find the path to the Poetry environment that was created in step 4 by running this command: + ```shell + poetry env info --path + ``` + 6. Copy the output of step 5 to your clipboard. + 7. Open the command palette and search for "Python: Select Interpreter". + 8. Select the Poetry environment that matches the output of step 5. It should show up in the list of available interpreters. If it does not, you can pick it manually by choosing "Enter interpreter path..." and pasting the path that you copied in step 6 into the input field. + 9. Open the command palette and search for "Python: Configure Tests". + 10. Select "pytest" as the test runner. + 11. Select "tests" as the directory containing tests. + +=== "Generic" + + 1. Clone the repository. + 2. Create a new Poetry environment and install the dependencies of the project by running this command: + ```shell + poetry install + ``` + +## Running the tests + +=== "PyCharm" + + Right-click the `tests` directory in the [Project tool window](https://www.jetbrains.com/help/pycharm/project-tool-window.html) and select "Run 'pytest in tests'". + +=== "Visual Studio Code" + + Run the tests by opening the command palette and searching for "Test: Run All Tests". + +=== "Generic" + + Run this command from the root of the repository: + ```shell + poetry run pytest + ``` ## Serving the documentation -1. Start the server by running this command: +1. Start the server by running this command from the root of the repository: ```shell poetry run mkdocs serve ``` diff --git a/docs/development/img/pycharm_python_integrated_tools.png b/docs/development/img/pycharm_python_integrated_tools.png new file mode 100644 index 00000000..44424827 Binary files /dev/null and b/docs/development/img/pycharm_python_integrated_tools.png differ diff --git a/mkdocs.yml b/mkdocs.yml index f425fe05..cfb117a8 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -38,9 +38,11 @@ theme: features: - content.code.copy - navigation.tabs + - content.tabs.link - navigation.indexes - navigation.instant - navigation.sections + - navigation.tabs - navigation.top plugins: @@ -119,6 +121,10 @@ markdown_extensions: - toc: permalink: true + # Tabs + - pymdownx.tabbed: + alternate_style: true + extra_javascript: - javascript/mathjax.js - https://polyfill.io/v3/polyfill.min.js?features=es6 diff --git a/package-lock.json b/package-lock.json index 008fbcba..3c59a8f6 100644 --- a/package-lock.json +++ b/package-lock.json @@ -12,8 +12,8 @@ "@semantic-release/changelog": "^6.0.3", "@semantic-release/exec": "^6.0.3", "@semantic-release/git": "^10.0.1", - "conventional-changelog-conventionalcommits": "^5.0.0", - "semantic-release": "^21.0.0" + "conventional-changelog-conventionalcommits": "^6.1.0", + "semantic-release": "^21.0.6" } }, "node_modules/@babel/code-frame": { @@ -177,21 +177,18 @@ } }, "node_modules/@octokit/auth-token": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/@octokit/auth-token/-/auth-token-3.0.3.tgz", - "integrity": "sha512-/aFM2M4HVDBT/jjDBa84sJniv1t9Gm/rLkalaz9htOm+L+8JMj1k9w0CkUdcxNyNxZPlTxKPVko+m1VlM58ZVA==", + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@octokit/auth-token/-/auth-token-3.0.4.tgz", + "integrity": "sha512-TWFX7cZF2LXoCvdmJWY7XVPi74aSY0+FfBZNSXEXFkMpjcqsQwDSYVv5FhRFaI0V1ECnwbz4j59T/G+rXNWaIQ==", "dev": true, - "dependencies": { - "@octokit/types": "^9.0.0" - }, "engines": { "node": ">= 14" } }, "node_modules/@octokit/core": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@octokit/core/-/core-4.2.0.tgz", - "integrity": "sha512-AgvDRUg3COpR82P7PBdGZF/NNqGmtMq2NiPqeSsDIeCfYFOZ9gddqWNQHnFdEUf+YwOj4aZYmJnlPp7OXmDIDg==", + "version": "4.2.4", + "resolved": "https://registry.npmjs.org/@octokit/core/-/core-4.2.4.tgz", + "integrity": "sha512-rYKilwgzQ7/imScn3M9/pFfUf4I1AZEH3KhyJmtPdE2zfaXAn2mFfUy4FbKewzc2We5y/LlKLj36fWJLKC2SIQ==", "dev": true, "dependencies": { "@octokit/auth-token": "^3.0.0", @@ -207,9 +204,9 @@ } }, "node_modules/@octokit/endpoint": { - "version": "7.0.5", - "resolved": "https://registry.npmjs.org/@octokit/endpoint/-/endpoint-7.0.5.tgz", - "integrity": "sha512-LG4o4HMY1Xoaec87IqQ41TQ+glvIeTKqfjkCEmt5AIwDZJwQeVZFIEYXrYY6yLwK+pAScb9Gj4q+Nz2qSw1roA==", + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/@octokit/endpoint/-/endpoint-7.0.6.tgz", + "integrity": "sha512-5L4fseVRUsDFGR00tMWD/Trdeeihn999rTMGRMC1G/Ldi1uWlWJzI98H4Iak5DB/RVvQuyMYKqSK/R6mbSOQyg==", "dev": true, "dependencies": { "@octokit/types": "^9.0.0", @@ -221,9 +218,9 @@ } }, "node_modules/@octokit/graphql": { - "version": "5.0.5", - "resolved": "https://registry.npmjs.org/@octokit/graphql/-/graphql-5.0.5.tgz", - "integrity": "sha512-Qwfvh3xdqKtIznjX9lz2D458r7dJPP8l6r4GQkIdWQouZwHQK0mVT88uwiU2bdTU2OtT1uOlKpRciUWldpG0yQ==", + "version": "5.0.6", + "resolved": "https://registry.npmjs.org/@octokit/graphql/-/graphql-5.0.6.tgz", + "integrity": "sha512-Fxyxdy/JH0MnIB5h+UQ3yCoh1FG4kWXfFKkpWqjZHw/p+Kc8Y44Hu/kCgNBT6nU1shNumEchmW/sUO1JuQnPcw==", "dev": true, "dependencies": { "@octokit/request": "^6.0.0", @@ -235,55 +232,87 @@ } }, "node_modules/@octokit/openapi-types": { - "version": "16.0.0", - "resolved": "https://registry.npmjs.org/@octokit/openapi-types/-/openapi-types-16.0.0.tgz", - "integrity": "sha512-JbFWOqTJVLHZSUUoF4FzAZKYtqdxWu9Z5m2QQnOyEa04fOFljvyh7D3GYKbfuaSWisqehImiVIMG4eyJeP5VEA==", + "version": "18.0.0", + "resolved": "https://registry.npmjs.org/@octokit/openapi-types/-/openapi-types-18.0.0.tgz", + "integrity": "sha512-V8GImKs3TeQRxRtXFpG2wl19V7444NIOTDF24AWuIbmNaNYOQMWRbjcGDXV5B+0n887fgDcuMNOmlul+k+oJtw==", "dev": true }, "node_modules/@octokit/plugin-paginate-rest": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/@octokit/plugin-paginate-rest/-/plugin-paginate-rest-6.0.0.tgz", - "integrity": "sha512-Sq5VU1PfT6/JyuXPyt04KZNVsFOSBaYOAq2QRZUwzVlI10KFvcbUo8lR258AAQL1Et60b0WuVik+zOWKLuDZxw==", + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/@octokit/plugin-paginate-rest/-/plugin-paginate-rest-7.1.2.tgz", + "integrity": "sha512-Jx8KuKqEAVRsK6fMzZKv3h6UH9/NRDHsDRtUAROqqmZlCptM///Uef7A1ViZ/cbDplekz7VbDWdFLAZ/mpuDww==", "dev": true, "dependencies": { - "@octokit/types": "^9.0.0" + "@octokit/tsconfig": "^2.0.0", + "@octokit/types": "^9.3.2" }, "engines": { - "node": ">= 14" + "node": ">= 18" }, "peerDependencies": { "@octokit/core": ">=4" } }, - "node_modules/@octokit/plugin-request-log": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/@octokit/plugin-request-log/-/plugin-request-log-1.0.4.tgz", - "integrity": "sha512-mLUsMkgP7K/cnFEw07kWqXGF5LKrOkD+lhCrKvPHXWDywAwuDUeDwWBpc69XK3pNX0uKiVt8g5z96PJ6z9xCFA==", + "node_modules/@octokit/plugin-retry": { + "version": "5.0.4", + "resolved": "https://registry.npmjs.org/@octokit/plugin-retry/-/plugin-retry-5.0.4.tgz", + "integrity": "sha512-hw00fDIhOgijy4aSxS6weWF5uqZVeoiC/AptLLyjL8KFCJRGRaXfcfgj76h/Z3cSLTjRsEIQnNCTig8INttL/g==", "dev": true, + "dependencies": { + "@octokit/request-error": "^4.0.1", + "@octokit/types": "^10.0.0", + "bottleneck": "^2.15.3" + }, + "engines": { + "node": ">= 18" + }, "peerDependencies": { "@octokit/core": ">=3" } }, - "node_modules/@octokit/plugin-rest-endpoint-methods": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/@octokit/plugin-rest-endpoint-methods/-/plugin-rest-endpoint-methods-7.0.1.tgz", - "integrity": "sha512-pnCaLwZBudK5xCdrR823xHGNgqOzRnJ/mpC/76YPpNP7DybdsJtP7mdOwh+wYZxK5jqeQuhu59ogMI4NRlBUvA==", + "node_modules/@octokit/plugin-retry/node_modules/@octokit/request-error": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/@octokit/request-error/-/request-error-4.0.2.tgz", + "integrity": "sha512-uqwUEmZw3x4I9DGYq9fODVAAvcLsPQv97NRycP6syEFu5916M189VnNBW2zANNwqg3OiligNcAey7P0SET843w==", + "dev": true, + "dependencies": { + "@octokit/types": "^10.0.0", + "deprecation": "^2.0.0", + "once": "^1.4.0" + }, + "engines": { + "node": ">= 18" + } + }, + "node_modules/@octokit/plugin-retry/node_modules/@octokit/types": { + "version": "10.0.0", + "resolved": "https://registry.npmjs.org/@octokit/types/-/types-10.0.0.tgz", + "integrity": "sha512-Vm8IddVmhCgU1fxC1eyinpwqzXPEYu0NrYzD3YZjlGjyftdLBTeqNblRC0jmJmgxbJIsQlyogVeGnrNaaMVzIg==", + "dev": true, + "dependencies": { + "@octokit/openapi-types": "^18.0.0" + } + }, + "node_modules/@octokit/plugin-throttling": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/@octokit/plugin-throttling/-/plugin-throttling-6.1.0.tgz", + "integrity": "sha512-JqMbTiPC0sUSTsLQsdq3JVx1mx8UtTo5mwR80YqPXE93+XhevvSyOR1rO2Z+NbO/r0TK4hqFJSSi/9oIZBxZTg==", "dev": true, "dependencies": { "@octokit/types": "^9.0.0", - "deprecation": "^2.3.1" + "bottleneck": "^2.15.3" }, "engines": { - "node": ">= 14" + "node": ">= 18" }, "peerDependencies": { - "@octokit/core": ">=3" + "@octokit/core": "^4.0.0" } }, "node_modules/@octokit/request": { - "version": "6.2.3", - "resolved": "https://registry.npmjs.org/@octokit/request/-/request-6.2.3.tgz", - "integrity": "sha512-TNAodj5yNzrrZ/VxP+H5HiYaZep0H3GU0O7PaF+fhDrt8FPrnkei9Aal/txsN/1P7V3CPiThG0tIvpPDYUsyAA==", + "version": "6.2.8", + "resolved": "https://registry.npmjs.org/@octokit/request/-/request-6.2.8.tgz", + "integrity": "sha512-ow4+pkVQ+6XVVsekSYBzJC0VTVvh/FCTUUgTsboGq+DTeWdyIFV8WSCdo0RIxk6wSkBTHqIK1mYuY7nOBXOchw==", "dev": true, "dependencies": { "@octokit/endpoint": "^7.0.0", @@ -311,28 +340,19 @@ "node": ">= 14" } }, - "node_modules/@octokit/rest": { - "version": "19.0.7", - "resolved": "https://registry.npmjs.org/@octokit/rest/-/rest-19.0.7.tgz", - "integrity": "sha512-HRtSfjrWmWVNp2uAkEpQnuGMJsu/+dBr47dRc5QVgsCbnIc1+GFEaoKBWkYG+zjrsHpSqcAElMio+n10c0b5JA==", - "dev": true, - "dependencies": { - "@octokit/core": "^4.1.0", - "@octokit/plugin-paginate-rest": "^6.0.0", - "@octokit/plugin-request-log": "^1.0.4", - "@octokit/plugin-rest-endpoint-methods": "^7.0.0" - }, - "engines": { - "node": ">= 14" - } + "node_modules/@octokit/tsconfig": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@octokit/tsconfig/-/tsconfig-2.0.0.tgz", + "integrity": "sha512-tWnrai3quGt8+gRN2edzo9fmraWekeryXPeXDomMw2oFSpu/lH3VSWGn/q4V+rwjTRMeeXk/ci623/01Zet4VQ==", + "dev": true }, "node_modules/@octokit/types": { - "version": "9.0.0", - "resolved": "https://registry.npmjs.org/@octokit/types/-/types-9.0.0.tgz", - "integrity": "sha512-LUewfj94xCMH2rbD5YJ+6AQ4AVjFYTgpp6rboWM5T7N3IsIF65SBEOVcYMGAEzO/kKNiNaW4LoWtoThOhH06gw==", + "version": "9.3.2", + "resolved": "https://registry.npmjs.org/@octokit/types/-/types-9.3.2.tgz", + "integrity": "sha512-D4iHGTdAnEEVsB8fl95m1hiz7D5YiRdQ9b/OEb3BYRVwbLsGHcRVPz+u+BgRLNk0Q0/4iZCBqDN96j2XNxfXrA==", "dev": true, "dependencies": { - "@octokit/openapi-types": "^16.0.0" + "@octokit/openapi-types": "^18.0.0" } }, "node_modules/@pnpm/config.env-replace": { @@ -420,24 +440,67 @@ } }, "node_modules/@semantic-release/commit-analyzer": { - "version": "9.0.2", - "resolved": "https://registry.npmjs.org/@semantic-release/commit-analyzer/-/commit-analyzer-9.0.2.tgz", - "integrity": "sha512-E+dr6L+xIHZkX4zNMe6Rnwg4YQrWNXK+rNsvwOPpdFppvZO1olE2fIgWhv89TkQErygevbjsZFSIxp+u6w2e5g==", + "version": "10.0.1", + "resolved": "https://registry.npmjs.org/@semantic-release/commit-analyzer/-/commit-analyzer-10.0.1.tgz", + "integrity": "sha512-9ejHzTAijYs9z246sY/dKBatmOPcd0GQ7lH4MgLCkv1q4GCiDZRkjHJkaQZXZVaK7mJybS+sH3Ng6G8i3pYMGQ==", "dev": true, "dependencies": { - "conventional-changelog-angular": "^5.0.0", - "conventional-commits-filter": "^2.0.0", - "conventional-commits-parser": "^3.2.3", + "conventional-changelog-angular": "^6.0.0", + "conventional-commits-filter": "^3.0.0", + "conventional-commits-parser": "^4.0.0", "debug": "^4.0.0", "import-from": "^4.0.0", - "lodash": "^4.17.4", + "lodash-es": "^4.17.21", "micromatch": "^4.0.2" }, "engines": { - "node": ">=14.17" + "node": ">=18" }, "peerDependencies": { - "semantic-release": ">=18.0.0-beta.1" + "semantic-release": ">=20.1.0" + } + }, + "node_modules/@semantic-release/commit-analyzer/node_modules/conventional-changelog-angular": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/conventional-changelog-angular/-/conventional-changelog-angular-6.0.0.tgz", + "integrity": "sha512-6qLgrBF4gueoC7AFVHu51nHL9pF9FRjXrH+ceVf7WmAfH3gs+gEYOkvxhjMPjZu57I4AGUGoNTY8V7Hrgf1uqg==", + "dev": true, + "dependencies": { + "compare-func": "^2.0.0" + }, + "engines": { + "node": ">=14" + } + }, + "node_modules/@semantic-release/commit-analyzer/node_modules/conventional-commits-filter": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/conventional-commits-filter/-/conventional-commits-filter-3.0.0.tgz", + "integrity": "sha512-1ymej8b5LouPx9Ox0Dw/qAO2dVdfpRFq28e5Y0jJEU8ZrLdy0vOSkkIInwmxErFGhg6SALro60ZrwYFVTUDo4Q==", + "dev": true, + "dependencies": { + "lodash.ismatch": "^4.4.0", + "modify-values": "^1.0.1" + }, + "engines": { + "node": ">=14" + } + }, + "node_modules/@semantic-release/commit-analyzer/node_modules/conventional-commits-parser": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/conventional-commits-parser/-/conventional-commits-parser-4.0.0.tgz", + "integrity": "sha512-WRv5j1FsVM5FISJkoYMR6tPk07fkKT0UodruX4je86V4owk451yjXAKzKAPOs9l7y59E2viHUS9eQ+dfUA9NSg==", + "dev": true, + "dependencies": { + "is-text-path": "^1.0.1", + "JSONStream": "^1.3.5", + "meow": "^8.1.2", + "split2": "^3.2.2" + }, + "bin": { + "conventional-commits-parser": "cli.js" + }, + "engines": { + "node": ">=14" } }, "node_modules/@semantic-release/error": { @@ -741,64 +804,42 @@ } }, "node_modules/@semantic-release/github": { - "version": "8.0.7", - "resolved": "https://registry.npmjs.org/@semantic-release/github/-/github-8.0.7.tgz", - "integrity": "sha512-VtgicRIKGvmTHwm//iqTh/5NGQwsncOMR5vQK9pMT92Aem7dv37JFKKRuulUsAnUOIlO4G8wH3gPiBAA0iW0ww==", + "version": "9.0.3", + "resolved": "https://registry.npmjs.org/@semantic-release/github/-/github-9.0.3.tgz", + "integrity": "sha512-X6gq4USKVlCxPwIIyXb99jU7gwVWlnsKOevs+OyABRdoqc+OIRITbFmrrYU3eE1vGMGk+Qu/GAoLUQQQwC3YOA==", "dev": true, "dependencies": { - "@octokit/rest": "^19.0.0", - "@semantic-release/error": "^3.0.0", - "aggregate-error": "^3.0.0", - "bottleneck": "^2.18.1", - "debug": "^4.0.0", - "dir-glob": "^3.0.0", - "fs-extra": "^11.0.0", - "globby": "^11.0.0", - "http-proxy-agent": "^5.0.0", - "https-proxy-agent": "^5.0.0", + "@octokit/core": "^4.2.1", + "@octokit/plugin-paginate-rest": "^7.0.0", + "@octokit/plugin-retry": "^5.0.0", + "@octokit/plugin-throttling": "^6.0.0", + "@semantic-release/error": "^4.0.0", + "aggregate-error": "^4.0.1", + "debug": "^4.3.4", + "dir-glob": "^3.0.1", + "globby": "^13.1.4", + "http-proxy-agent": "^7.0.0", + "https-proxy-agent": "^7.0.0", "issue-parser": "^6.0.0", - "lodash": "^4.17.4", + "lodash-es": "^4.17.21", "mime": "^3.0.0", - "p-filter": "^2.0.0", - "p-retry": "^4.0.0", - "url-join": "^4.0.0" + "p-filter": "^3.0.0", + "url-join": "^5.0.0" }, "engines": { - "node": ">=14.17" + "node": ">=18" }, "peerDependencies": { - "semantic-release": ">=18.0.0-beta.1" - } - }, - "node_modules/@semantic-release/github/node_modules/aggregate-error": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/aggregate-error/-/aggregate-error-3.1.0.tgz", - "integrity": "sha512-4I7Td01quW/RpocfNayFdFVk1qSuoh0E7JrbRJ16nH01HhKFQ88INq9Sd+nd72zqRySlr9BmDA8xlEJ6vJMrYA==", - "dev": true, - "dependencies": { - "clean-stack": "^2.0.0", - "indent-string": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/@semantic-release/github/node_modules/clean-stack": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/clean-stack/-/clean-stack-2.2.0.tgz", - "integrity": "sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A==", - "dev": true, - "engines": { - "node": ">=6" + "semantic-release": ">=20.1.0" } }, - "node_modules/@semantic-release/github/node_modules/indent-string": { + "node_modules/@semantic-release/github/node_modules/@semantic-release/error": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz", - "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==", + "resolved": "https://registry.npmjs.org/@semantic-release/error/-/error-4.0.0.tgz", + "integrity": "sha512-mgdxrHTLOjOddRVYIYDo0fR3/v61GNN1YGkfbrjuIKg/uMgCd+Qzo3UAXJ+woLQQpos4pl5Esuw5A7AoNlzjUQ==", "dev": true, "engines": { - "node": ">=8" + "node": ">=18" } }, "node_modules/@semantic-release/npm": { @@ -847,9 +888,9 @@ } }, "node_modules/@semantic-release/release-notes-generator": { - "version": "10.0.3", - "resolved": "https://registry.npmjs.org/@semantic-release/release-notes-generator/-/release-notes-generator-10.0.3.tgz", - "integrity": "sha512-k4x4VhIKneOWoBGHkx0qZogNjCldLPRiAjnIpMnlUh6PtaWXp/T+C9U7/TaNDDtgDa5HMbHl4WlREdxHio6/3w==", + "version": "11.0.1", + "resolved": "https://registry.npmjs.org/@semantic-release/release-notes-generator/-/release-notes-generator-11.0.1.tgz", + "integrity": "sha512-4deWsiY4Rg80oc9Ms11N20BIDgYkPMys4scNYQpi2Njdrtw5Z55nXKNsUN3kn6Sy/nI9dqqbp5L63TL4luI5Bw==", "dev": true, "dependencies": { "conventional-changelog-angular": "^5.0.0", @@ -859,102 +900,15 @@ "debug": "^4.0.0", "get-stream": "^6.0.0", "import-from": "^4.0.0", - "into-stream": "^6.0.0", - "lodash": "^4.17.4", - "read-pkg-up": "^7.0.0" + "into-stream": "^7.0.0", + "lodash-es": "^4.17.21", + "read-pkg-up": "^9.0.0" }, "engines": { - "node": ">=14.17" + "node": ">=18" }, "peerDependencies": { - "semantic-release": ">=18.0.0-beta.1" - } - }, - "node_modules/@semantic-release/release-notes-generator/node_modules/find-up": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", - "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", - "dev": true, - "dependencies": { - "locate-path": "^5.0.0", - "path-exists": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/@semantic-release/release-notes-generator/node_modules/locate-path": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", - "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", - "dev": true, - "dependencies": { - "p-locate": "^4.1.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/@semantic-release/release-notes-generator/node_modules/p-limit": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", - "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", - "dev": true, - "dependencies": { - "p-try": "^2.0.0" - }, - "engines": { - "node": ">=6" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/@semantic-release/release-notes-generator/node_modules/p-locate": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", - "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", - "dev": true, - "dependencies": { - "p-limit": "^2.2.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/@semantic-release/release-notes-generator/node_modules/read-pkg-up": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/read-pkg-up/-/read-pkg-up-7.0.1.tgz", - "integrity": "sha512-zK0TB7Xd6JpCLmlLmufqykGE+/TlOePD6qKClNW7hHDKFh/J7/7gCWGR7joEQEW1bKq3a3yUZSObOoWLFQ4ohg==", - "dev": true, - "dependencies": { - "find-up": "^4.1.0", - "read-pkg": "^5.2.0", - "type-fest": "^0.8.1" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/@semantic-release/release-notes-generator/node_modules/type-fest": { - "version": "0.8.1", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.8.1.tgz", - "integrity": "sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/@tootallnate/once": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/@tootallnate/once/-/once-2.0.0.tgz", - "integrity": "sha512-XCuKFP5PS55gnMVu3dty8KPatLqUoy/ZYzDzAGCQ8JNFCkLXzmI7vNHCR+XpbZaMWQK/vQubr7PkYq8g470J/A==", - "dev": true, - "engines": { - "node": ">= 10" + "semantic-release": ">=20.1.0" } }, "node_modules/@types/minimist": { @@ -969,22 +923,16 @@ "integrity": "sha512-Gj7cI7z+98M282Tqmp2K5EIsoouUEzbBJhQQzDE3jSIRk6r9gsz0oUokqIUR4u1R3dMHo0pDHM7sNOHyhulypw==", "dev": true }, - "node_modules/@types/retry": { - "version": "0.12.0", - "resolved": "https://registry.npmjs.org/@types/retry/-/retry-0.12.0.tgz", - "integrity": "sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA==", - "dev": true - }, "node_modules/agent-base": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-6.0.2.tgz", - "integrity": "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==", + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.0.tgz", + "integrity": "sha512-o/zjMZRhJxny7OyEF+Op8X+efiELC7k7yOjMzgfzVqOzXqkBkWI79YoTdOtsuWd5BWhAGAuOY/Xa6xpiaWXiNg==", "dev": true, "dependencies": { - "debug": "4" + "debug": "^4.3.4" }, "engines": { - "node": ">= 6.0.0" + "node": ">= 14" } }, "node_modules/aggregate-error": { @@ -1078,15 +1026,6 @@ "integrity": "sha512-c5AMf34bKdvPhQ7tBGhqkgKNUzMr4WUs+WDtC2ZUGOUncbxKMTvqxYctiseW3+L4bA8ec+GcZ6/A/FW4m8ukng==", "dev": true }, - "node_modules/array-union": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz", - "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==", - "dev": true, - "engines": { - "node": ">=8" - } - }, "node_modules/arrify": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/arrify/-/arrify-1.0.1.tgz", @@ -1276,17 +1215,15 @@ } }, "node_modules/conventional-changelog-conventionalcommits": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/conventional-changelog-conventionalcommits/-/conventional-changelog-conventionalcommits-5.0.0.tgz", - "integrity": "sha512-lCDbA+ZqVFQGUj7h9QBKoIpLhl8iihkO0nCTyRNzuXtcd7ubODpYB04IFy31JloiJgG0Uovu8ot8oxRzn7Nwtw==", + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/conventional-changelog-conventionalcommits/-/conventional-changelog-conventionalcommits-6.1.0.tgz", + "integrity": "sha512-3cS3GEtR78zTfMzk0AizXKKIdN4OvSh7ibNz6/DPbhWWQu7LqE/8+/GqSodV+sywUR2gpJAdP/1JFf4XtN7Zpw==", "dev": true, "dependencies": { - "compare-func": "^2.0.0", - "lodash": "^4.17.15", - "q": "^1.5.1" + "compare-func": "^2.0.0" }, "engines": { - "node": ">=10" + "node": ">=14" } }, "node_modules/conventional-changelog-writer": { @@ -1528,48 +1465,16 @@ } }, "node_modules/env-ci": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/env-ci/-/env-ci-8.0.0.tgz", - "integrity": "sha512-W+3BqGZozFua9MPeXpmTm5eYEBtGgL76jGu/pwMVp/L8PdECSCEWaIp7d4Mw7kuUrbUldK0oV0bNd6ZZjLiMiA==", + "version": "9.0.0", + "resolved": "https://registry.npmjs.org/env-ci/-/env-ci-9.0.0.tgz", + "integrity": "sha512-Q3cjr1tX9xwigprw4G8M3o7PIOO/1LYji6TyGsbD1WfMmD23etZvhgmPXJqkP788yH4dgSSK7oaIMuaayUJIfg==", "dev": true, "dependencies": { - "execa": "^6.1.0", + "execa": "^7.0.0", "java-properties": "^1.0.2" }, "engines": { - "node": "^16.10 || >=18" - } - }, - "node_modules/env-ci/node_modules/execa": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/execa/-/execa-6.1.0.tgz", - "integrity": "sha512-QVWlX2e50heYJcCPG0iWtf8r0xjEYfz/OYLGDYH+IyjWezzPNxz63qNFOu0l4YftGWuizFVZHHs8PrLU5p2IDA==", - "dev": true, - "dependencies": { - "cross-spawn": "^7.0.3", - "get-stream": "^6.0.1", - "human-signals": "^3.0.1", - "is-stream": "^3.0.0", - "merge-stream": "^2.0.0", - "npm-run-path": "^5.1.0", - "onetime": "^6.0.0", - "signal-exit": "^3.0.7", - "strip-final-newline": "^3.0.0" - }, - "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" - }, - "funding": { - "url": "https://github.com/sindresorhus/execa?sponsor=1" - } - }, - "node_modules/env-ci/node_modules/human-signals": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-3.0.1.tgz", - "integrity": "sha512-rQLskxnM/5OCldHo+wNXbpVgDn5A17CUoKX+7Sokwaknlq7CdSnphy0W39GU8dw59XiCXmFXDg4fRuckQRKewQ==", - "dev": true, - "engines": { - "node": ">=12.20.0" + "node": "^16.14 || >=18" } }, "node_modules/error-ex": { @@ -1627,9 +1532,9 @@ } }, "node_modules/fast-glob": { - "version": "3.2.12", - "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.2.12.tgz", - "integrity": "sha512-DVj4CQIYYow0BlaelwK1pHl5n5cRSJfM60UA0zK891sVInoPri2Ekj7+e1CT3/3qxXenpI+nBBmQAcJPJgaj4w==", + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.0.tgz", + "integrity": "sha512-ChDuvbOypPuNjO8yIDf36x7BlZX1smcUMTTcyoIjycexOxd6DFsKsg21qVBzEmr3G7fUKIRy2/psii+CIUt7FA==", "dev": true, "dependencies": { "@nodelib/fs.stat": "^2.0.2", @@ -1642,18 +1547,6 @@ "node": ">=8.6.0" } }, - "node_modules/fast-glob/node_modules/glob-parent": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", - "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", - "dev": true, - "dependencies": { - "is-glob": "^4.0.1" - }, - "engines": { - "node": ">= 6" - } - }, "node_modules/fastq": { "version": "1.15.0", "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.15.0.tgz", @@ -1802,21 +1695,32 @@ "xtend": "~4.0.1" } }, + "node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, "node_modules/globby": { - "version": "11.1.0", - "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz", - "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==", + "version": "13.2.0", + "resolved": "https://registry.npmjs.org/globby/-/globby-13.2.0.tgz", + "integrity": "sha512-jWsQfayf13NvqKUIL3Ta+CIqMnvlaIDFveWE/dpOZ9+3AMEJozsxDvKA02zync9UuvOM8rOXzsD5GqKP4OnWPQ==", "dev": true, "dependencies": { - "array-union": "^2.1.0", "dir-glob": "^3.0.1", - "fast-glob": "^3.2.9", + "fast-glob": "^3.2.11", "ignore": "^5.2.0", "merge2": "^1.4.1", - "slash": "^3.0.0" + "slash": "^4.0.0" }, "engines": { - "node": ">=10" + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" @@ -1904,30 +1808,29 @@ } }, "node_modules/http-proxy-agent": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-5.0.0.tgz", - "integrity": "sha512-n2hY8YdoRE1i7r6M0w9DIw5GgZN0G25P8zLCRQ8rjXtTU3vsNFBI/vWK/UIeE6g5MUUz6avwAPXmL6Fy9D/90w==", + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-7.0.0.tgz", + "integrity": "sha512-+ZT+iBxVUQ1asugqnD6oWoRiS25AkjNfG085dKJGtGxkdwLQrMKU5wJr2bOOFAXzKcTuqq+7fZlTMgG3SRfIYQ==", "dev": true, "dependencies": { - "@tootallnate/once": "2", - "agent-base": "6", - "debug": "4" + "agent-base": "^7.1.0", + "debug": "^4.3.4" }, "engines": { - "node": ">= 6" + "node": ">= 14" } }, "node_modules/https-proxy-agent": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-5.0.1.tgz", - "integrity": "sha512-dFcAjpTQFgoLMzC2VwU+C/CbS7uRL0lWmxDITmqm7C+7F0Odmj6s9l6alZc6AELXhrnggM2CeWSXHGOdX2YtwA==", + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.0.tgz", + "integrity": "sha512-0euwPCRyAPSgGdzD1IVN9nJYHtBhJwb6XPfbpQcYbPCwrBidX6GzxmchnaF4sfF/jPb74Ojx5g4yTg3sixlyPw==", "dev": true, "dependencies": { - "agent-base": "6", + "agent-base": "^7.0.2", "debug": "4" }, "engines": { - "node": ">= 6" + "node": ">= 14" } }, "node_modules/human-signals": { @@ -2001,16 +1904,16 @@ "dev": true }, "node_modules/into-stream": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/into-stream/-/into-stream-6.0.0.tgz", - "integrity": "sha512-XHbaOAvP+uFKUFsOgoNPRjLkwB+I22JFPFe5OjTkQ0nwgj6+pSjb4NmB6VMxaPshLiOf+zcpOCBQuLwC1KHhZA==", + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/into-stream/-/into-stream-7.0.0.tgz", + "integrity": "sha512-2dYz766i9HprMBasCMvHMuazJ7u4WzhJwo5kb3iPSiW/iRYV6uPari3zHoqZlnuaR7V1bEiNMxikhp37rdBXbw==", "dev": true, "dependencies": { "from2": "^2.3.0", "p-is-promise": "^3.0.0" }, "engines": { - "node": ">=10" + "node": ">=12" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" @@ -2357,6 +2260,7 @@ "resolved": "https://registry.npmjs.org/marked/-/marked-4.2.12.tgz", "integrity": "sha512-yr8hSKa3Fv4D3jdZmtMMPghgVt6TWbk86WQaWhDloQjRSQhMMYCAro7jP7VDJrjjdV8pxVxMssXS8B8Y5DZ5aw==", "dev": true, + "peer": true, "bin": { "marked": "bin/marked.js" }, @@ -2632,9 +2536,9 @@ } }, "node_modules/node-fetch": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.6.9.tgz", - "integrity": "sha512-DJm/CJkZkRjKKj4Zi4BsKVZh3ValV5IR5s7LVZnW+6YMh0W1BfNA8XSs6DLMGYlId5F3KnA70uu2qepcR08Qqg==", + "version": "2.6.12", + "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.6.12.tgz", + "integrity": "sha512-C/fGU2E8ToujUivIO0H+tpQ6HWo4eEmchoPIoXtxCrVghxdKq+QOHqEZW7tuP3KlV3bC8FRMO5nMCC7Zm1VP6g==", "dev": true, "dependencies": { "whatwg-url": "^5.0.0" @@ -5727,15 +5631,18 @@ } }, "node_modules/p-filter": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/p-filter/-/p-filter-2.1.0.tgz", - "integrity": "sha512-ZBxxZ5sL2HghephhpGAQdoskxplTwr7ICaehZwLIlfL6acuVgZPm8yBNuRAFBGEqtD/hmUeq9eqLg2ys9Xr/yw==", + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/p-filter/-/p-filter-3.0.0.tgz", + "integrity": "sha512-QtoWLjXAW++uTX67HZQz1dbTpqBfiidsB6VtQUC9iR85S120+s0T5sO6s+B5MLzFcZkrEd/DGMmCjR+f2Qpxwg==", "dev": true, "dependencies": { - "p-map": "^2.0.0" + "p-map": "^5.1.0" }, "engines": { - "node": ">=8" + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/p-is-promise": { @@ -5748,12 +5655,18 @@ } }, "node_modules/p-map": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/p-map/-/p-map-2.1.0.tgz", - "integrity": "sha512-y3b8Kpd8OAN444hxfBbFfj1FY/RjtTd8tzYwhUqNYXx0fXx2iX4maP4Qr6qhIKbQXI02wTLAda4fYUbDagTUFw==", + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/p-map/-/p-map-5.5.0.tgz", + "integrity": "sha512-VFqfGDHlx87K66yZrNdI4YGtD70IRyd+zSvgks6mzHPRNkoKy+9EKP4SFC77/vTTQYmRmti7dvqC+m5jBrBAcg==", "dev": true, + "dependencies": { + "aggregate-error": "^4.0.0" + }, "engines": { - "node": ">=6" + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/p-reduce": { @@ -5768,19 +5681,6 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/p-retry": { - "version": "4.6.2", - "resolved": "https://registry.npmjs.org/p-retry/-/p-retry-4.6.2.tgz", - "integrity": "sha512-312Id396EbJdvRONlngUx0NydfrIQ5lsYu0znKVUzVvArzEIt08V1qhtyESbGVd1FGX7UKtiFp5uwKZdM8wIuQ==", - "dev": true, - "dependencies": { - "@types/retry": "0.12.0", - "retry": "^0.13.1" - }, - "engines": { - "node": ">=8" - } - }, "node_modules/p-try": { "version": "2.2.0", "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", @@ -6303,15 +6203,6 @@ "node": ">=4" } }, - "node_modules/retry": { - "version": "0.13.1", - "resolved": "https://registry.npmjs.org/retry/-/retry-0.13.1.tgz", - "integrity": "sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg==", - "dev": true, - "engines": { - "node": ">= 4" - } - }, "node_modules/reusify": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz", @@ -6352,20 +6243,20 @@ "dev": true }, "node_modules/semantic-release": { - "version": "21.0.0", - "resolved": "https://registry.npmjs.org/semantic-release/-/semantic-release-21.0.0.tgz", - "integrity": "sha512-zks0jVk2Hbyhn014vshcwQ6e6gM9jDPr8SdujqfAzPJBvvvSXa8GHz/x+W0VaW2aBNawWFAlx6N45dp1H1XCCw==", + "version": "21.0.6", + "resolved": "https://registry.npmjs.org/semantic-release/-/semantic-release-21.0.6.tgz", + "integrity": "sha512-NDyosObAwUNzPpdf+mpL49Xy+5iYHjdWM34LBNdbdYv9vBLbw+eCCDihxcqPh+f9m4ZzlBrYCkHUaZv2vPGW9A==", "dev": true, "dependencies": { - "@semantic-release/commit-analyzer": "^9.0.2", - "@semantic-release/error": "^3.0.0", - "@semantic-release/github": "^8.0.0", + "@semantic-release/commit-analyzer": "^10.0.0", + "@semantic-release/error": "^4.0.0", + "@semantic-release/github": "^9.0.0", "@semantic-release/npm": "^10.0.2", - "@semantic-release/release-notes-generator": "^10.0.0", + "@semantic-release/release-notes-generator": "^11.0.0", "aggregate-error": "^4.0.1", "cosmiconfig": "^8.0.0", "debug": "^4.0.0", - "env-ci": "^8.0.0", + "env-ci": "^9.0.0", "execa": "^7.0.0", "figures": "^5.0.0", "find-versions": "^5.1.0", @@ -6374,7 +6265,7 @@ "hook-std": "^3.0.0", "hosted-git-info": "^6.0.0", "lodash-es": "^4.17.21", - "marked": "^4.1.0", + "marked": "^5.0.0", "marked-terminal": "^5.1.1", "micromatch": "^4.0.2", "p-each-series": "^3.0.0", @@ -6387,10 +6278,31 @@ "yargs": "^17.5.1" }, "bin": { - "semantic-release": "bin/semantic-release.js" + "semantic-release": "bin/semantic-release.js" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/semantic-release/node_modules/@semantic-release/error": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/@semantic-release/error/-/error-4.0.0.tgz", + "integrity": "sha512-mgdxrHTLOjOddRVYIYDo0fR3/v61GNN1YGkfbrjuIKg/uMgCd+Qzo3UAXJ+woLQQpos4pl5Esuw5A7AoNlzjUQ==", + "dev": true, + "engines": { + "node": ">=18" + } + }, + "node_modules/semantic-release/node_modules/marked": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/marked/-/marked-5.1.0.tgz", + "integrity": "sha512-z3/nBe7aTI8JDszlYLk7dDVNpngjw0o1ZJtrA9kIfkkHcIF+xH7mO23aISl4WxP83elU+MFROgahqdpd05lMEQ==", + "dev": true, + "bin": { + "marked": "bin/marked.js" }, "engines": { - "node": ">=18" + "node": ">= 18" } }, "node_modules/semantic-release/node_modules/resolve-from": { @@ -6581,12 +6493,15 @@ } }, "node_modules/slash": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", - "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-4.0.0.tgz", + "integrity": "sha512-3dOsAHXXUkQTpOYcoAxLIorMTp4gIQr5IW3iVb7A7lFIp0VHhnynm9izx6TssdrIcVIESAlVjtnO2K8bg+Coew==", "dev": true, "engines": { - "node": ">=8" + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/source-map": { @@ -6958,10 +6873,13 @@ } }, "node_modules/url-join": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/url-join/-/url-join-4.0.1.tgz", - "integrity": "sha512-jk1+QP6ZJqyOiuEI9AEWQfju/nB2Pw466kbA0LEZljHwKeMgd9WrAEgEGxjPDD2+TNbbb37rTyhEfrCXfuKXnA==", - "dev": true + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/url-join/-/url-join-5.0.0.tgz", + "integrity": "sha512-n2huDr9h9yzd6exQVnH/jU5mr+Pfx08LRXXZhkLLetAMESRj+anQsTAh940iMrIetKAmry9coFuZQ2jY8/p3WA==", + "dev": true, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + } }, "node_modules/util-deprecate": { "version": "1.0.2", @@ -7226,18 +7144,15 @@ } }, "@octokit/auth-token": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/@octokit/auth-token/-/auth-token-3.0.3.tgz", - "integrity": "sha512-/aFM2M4HVDBT/jjDBa84sJniv1t9Gm/rLkalaz9htOm+L+8JMj1k9w0CkUdcxNyNxZPlTxKPVko+m1VlM58ZVA==", - "dev": true, - "requires": { - "@octokit/types": "^9.0.0" - } + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@octokit/auth-token/-/auth-token-3.0.4.tgz", + "integrity": "sha512-TWFX7cZF2LXoCvdmJWY7XVPi74aSY0+FfBZNSXEXFkMpjcqsQwDSYVv5FhRFaI0V1ECnwbz4j59T/G+rXNWaIQ==", + "dev": true }, "@octokit/core": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@octokit/core/-/core-4.2.0.tgz", - "integrity": "sha512-AgvDRUg3COpR82P7PBdGZF/NNqGmtMq2NiPqeSsDIeCfYFOZ9gddqWNQHnFdEUf+YwOj4aZYmJnlPp7OXmDIDg==", + "version": "4.2.4", + "resolved": "https://registry.npmjs.org/@octokit/core/-/core-4.2.4.tgz", + "integrity": "sha512-rYKilwgzQ7/imScn3M9/pFfUf4I1AZEH3KhyJmtPdE2zfaXAn2mFfUy4FbKewzc2We5y/LlKLj36fWJLKC2SIQ==", "dev": true, "requires": { "@octokit/auth-token": "^3.0.0", @@ -7250,9 +7165,9 @@ } }, "@octokit/endpoint": { - "version": "7.0.5", - "resolved": "https://registry.npmjs.org/@octokit/endpoint/-/endpoint-7.0.5.tgz", - "integrity": "sha512-LG4o4HMY1Xoaec87IqQ41TQ+glvIeTKqfjkCEmt5AIwDZJwQeVZFIEYXrYY6yLwK+pAScb9Gj4q+Nz2qSw1roA==", + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/@octokit/endpoint/-/endpoint-7.0.6.tgz", + "integrity": "sha512-5L4fseVRUsDFGR00tMWD/Trdeeihn999rTMGRMC1G/Ldi1uWlWJzI98H4Iak5DB/RVvQuyMYKqSK/R6mbSOQyg==", "dev": true, "requires": { "@octokit/types": "^9.0.0", @@ -7261,9 +7176,9 @@ } }, "@octokit/graphql": { - "version": "5.0.5", - "resolved": "https://registry.npmjs.org/@octokit/graphql/-/graphql-5.0.5.tgz", - "integrity": "sha512-Qwfvh3xdqKtIznjX9lz2D458r7dJPP8l6r4GQkIdWQouZwHQK0mVT88uwiU2bdTU2OtT1uOlKpRciUWldpG0yQ==", + "version": "5.0.6", + "resolved": "https://registry.npmjs.org/@octokit/graphql/-/graphql-5.0.6.tgz", + "integrity": "sha512-Fxyxdy/JH0MnIB5h+UQ3yCoh1FG4kWXfFKkpWqjZHw/p+Kc8Y44Hu/kCgNBT6nU1shNumEchmW/sUO1JuQnPcw==", "dev": true, "requires": { "@octokit/request": "^6.0.0", @@ -7272,41 +7187,68 @@ } }, "@octokit/openapi-types": { - "version": "16.0.0", - "resolved": "https://registry.npmjs.org/@octokit/openapi-types/-/openapi-types-16.0.0.tgz", - "integrity": "sha512-JbFWOqTJVLHZSUUoF4FzAZKYtqdxWu9Z5m2QQnOyEa04fOFljvyh7D3GYKbfuaSWisqehImiVIMG4eyJeP5VEA==", + "version": "18.0.0", + "resolved": "https://registry.npmjs.org/@octokit/openapi-types/-/openapi-types-18.0.0.tgz", + "integrity": "sha512-V8GImKs3TeQRxRtXFpG2wl19V7444NIOTDF24AWuIbmNaNYOQMWRbjcGDXV5B+0n887fgDcuMNOmlul+k+oJtw==", "dev": true }, "@octokit/plugin-paginate-rest": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/@octokit/plugin-paginate-rest/-/plugin-paginate-rest-6.0.0.tgz", - "integrity": "sha512-Sq5VU1PfT6/JyuXPyt04KZNVsFOSBaYOAq2QRZUwzVlI10KFvcbUo8lR258AAQL1Et60b0WuVik+zOWKLuDZxw==", + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/@octokit/plugin-paginate-rest/-/plugin-paginate-rest-7.1.2.tgz", + "integrity": "sha512-Jx8KuKqEAVRsK6fMzZKv3h6UH9/NRDHsDRtUAROqqmZlCptM///Uef7A1ViZ/cbDplekz7VbDWdFLAZ/mpuDww==", "dev": true, "requires": { - "@octokit/types": "^9.0.0" + "@octokit/tsconfig": "^2.0.0", + "@octokit/types": "^9.3.2" } }, - "@octokit/plugin-request-log": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/@octokit/plugin-request-log/-/plugin-request-log-1.0.4.tgz", - "integrity": "sha512-mLUsMkgP7K/cnFEw07kWqXGF5LKrOkD+lhCrKvPHXWDywAwuDUeDwWBpc69XK3pNX0uKiVt8g5z96PJ6z9xCFA==", + "@octokit/plugin-retry": { + "version": "5.0.4", + "resolved": "https://registry.npmjs.org/@octokit/plugin-retry/-/plugin-retry-5.0.4.tgz", + "integrity": "sha512-hw00fDIhOgijy4aSxS6weWF5uqZVeoiC/AptLLyjL8KFCJRGRaXfcfgj76h/Z3cSLTjRsEIQnNCTig8INttL/g==", "dev": true, - "requires": {} + "requires": { + "@octokit/request-error": "^4.0.1", + "@octokit/types": "^10.0.0", + "bottleneck": "^2.15.3" + }, + "dependencies": { + "@octokit/request-error": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/@octokit/request-error/-/request-error-4.0.2.tgz", + "integrity": "sha512-uqwUEmZw3x4I9DGYq9fODVAAvcLsPQv97NRycP6syEFu5916M189VnNBW2zANNwqg3OiligNcAey7P0SET843w==", + "dev": true, + "requires": { + "@octokit/types": "^10.0.0", + "deprecation": "^2.0.0", + "once": "^1.4.0" + } + }, + "@octokit/types": { + "version": "10.0.0", + "resolved": "https://registry.npmjs.org/@octokit/types/-/types-10.0.0.tgz", + "integrity": "sha512-Vm8IddVmhCgU1fxC1eyinpwqzXPEYu0NrYzD3YZjlGjyftdLBTeqNblRC0jmJmgxbJIsQlyogVeGnrNaaMVzIg==", + "dev": true, + "requires": { + "@octokit/openapi-types": "^18.0.0" + } + } + } }, - "@octokit/plugin-rest-endpoint-methods": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/@octokit/plugin-rest-endpoint-methods/-/plugin-rest-endpoint-methods-7.0.1.tgz", - "integrity": "sha512-pnCaLwZBudK5xCdrR823xHGNgqOzRnJ/mpC/76YPpNP7DybdsJtP7mdOwh+wYZxK5jqeQuhu59ogMI4NRlBUvA==", + "@octokit/plugin-throttling": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/@octokit/plugin-throttling/-/plugin-throttling-6.1.0.tgz", + "integrity": "sha512-JqMbTiPC0sUSTsLQsdq3JVx1mx8UtTo5mwR80YqPXE93+XhevvSyOR1rO2Z+NbO/r0TK4hqFJSSi/9oIZBxZTg==", "dev": true, "requires": { "@octokit/types": "^9.0.0", - "deprecation": "^2.3.1" + "bottleneck": "^2.15.3" } }, "@octokit/request": { - "version": "6.2.3", - "resolved": "https://registry.npmjs.org/@octokit/request/-/request-6.2.3.tgz", - "integrity": "sha512-TNAodj5yNzrrZ/VxP+H5HiYaZep0H3GU0O7PaF+fhDrt8FPrnkei9Aal/txsN/1P7V3CPiThG0tIvpPDYUsyAA==", + "version": "6.2.8", + "resolved": "https://registry.npmjs.org/@octokit/request/-/request-6.2.8.tgz", + "integrity": "sha512-ow4+pkVQ+6XVVsekSYBzJC0VTVvh/FCTUUgTsboGq+DTeWdyIFV8WSCdo0RIxk6wSkBTHqIK1mYuY7nOBXOchw==", "dev": true, "requires": { "@octokit/endpoint": "^7.0.0", @@ -7328,25 +7270,19 @@ "once": "^1.4.0" } }, - "@octokit/rest": { - "version": "19.0.7", - "resolved": "https://registry.npmjs.org/@octokit/rest/-/rest-19.0.7.tgz", - "integrity": "sha512-HRtSfjrWmWVNp2uAkEpQnuGMJsu/+dBr47dRc5QVgsCbnIc1+GFEaoKBWkYG+zjrsHpSqcAElMio+n10c0b5JA==", - "dev": true, - "requires": { - "@octokit/core": "^4.1.0", - "@octokit/plugin-paginate-rest": "^6.0.0", - "@octokit/plugin-request-log": "^1.0.4", - "@octokit/plugin-rest-endpoint-methods": "^7.0.0" - } + "@octokit/tsconfig": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@octokit/tsconfig/-/tsconfig-2.0.0.tgz", + "integrity": "sha512-tWnrai3quGt8+gRN2edzo9fmraWekeryXPeXDomMw2oFSpu/lH3VSWGn/q4V+rwjTRMeeXk/ci623/01Zet4VQ==", + "dev": true }, "@octokit/types": { - "version": "9.0.0", - "resolved": "https://registry.npmjs.org/@octokit/types/-/types-9.0.0.tgz", - "integrity": "sha512-LUewfj94xCMH2rbD5YJ+6AQ4AVjFYTgpp6rboWM5T7N3IsIF65SBEOVcYMGAEzO/kKNiNaW4LoWtoThOhH06gw==", + "version": "9.3.2", + "resolved": "https://registry.npmjs.org/@octokit/types/-/types-9.3.2.tgz", + "integrity": "sha512-D4iHGTdAnEEVsB8fl95m1hiz7D5YiRdQ9b/OEb3BYRVwbLsGHcRVPz+u+BgRLNk0Q0/4iZCBqDN96j2XNxfXrA==", "dev": true, "requires": { - "@octokit/openapi-types": "^16.0.0" + "@octokit/openapi-types": "^18.0.0" } }, "@pnpm/config.env-replace": { @@ -7412,18 +7348,51 @@ } }, "@semantic-release/commit-analyzer": { - "version": "9.0.2", - "resolved": "https://registry.npmjs.org/@semantic-release/commit-analyzer/-/commit-analyzer-9.0.2.tgz", - "integrity": "sha512-E+dr6L+xIHZkX4zNMe6Rnwg4YQrWNXK+rNsvwOPpdFppvZO1olE2fIgWhv89TkQErygevbjsZFSIxp+u6w2e5g==", + "version": "10.0.1", + "resolved": "https://registry.npmjs.org/@semantic-release/commit-analyzer/-/commit-analyzer-10.0.1.tgz", + "integrity": "sha512-9ejHzTAijYs9z246sY/dKBatmOPcd0GQ7lH4MgLCkv1q4GCiDZRkjHJkaQZXZVaK7mJybS+sH3Ng6G8i3pYMGQ==", "dev": true, "requires": { - "conventional-changelog-angular": "^5.0.0", - "conventional-commits-filter": "^2.0.0", - "conventional-commits-parser": "^3.2.3", + "conventional-changelog-angular": "^6.0.0", + "conventional-commits-filter": "^3.0.0", + "conventional-commits-parser": "^4.0.0", "debug": "^4.0.0", "import-from": "^4.0.0", - "lodash": "^4.17.4", + "lodash-es": "^4.17.21", "micromatch": "^4.0.2" + }, + "dependencies": { + "conventional-changelog-angular": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/conventional-changelog-angular/-/conventional-changelog-angular-6.0.0.tgz", + "integrity": "sha512-6qLgrBF4gueoC7AFVHu51nHL9pF9FRjXrH+ceVf7WmAfH3gs+gEYOkvxhjMPjZu57I4AGUGoNTY8V7Hrgf1uqg==", + "dev": true, + "requires": { + "compare-func": "^2.0.0" + } + }, + "conventional-commits-filter": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/conventional-commits-filter/-/conventional-commits-filter-3.0.0.tgz", + "integrity": "sha512-1ymej8b5LouPx9Ox0Dw/qAO2dVdfpRFq28e5Y0jJEU8ZrLdy0vOSkkIInwmxErFGhg6SALro60ZrwYFVTUDo4Q==", + "dev": true, + "requires": { + "lodash.ismatch": "^4.4.0", + "modify-values": "^1.0.1" + } + }, + "conventional-commits-parser": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/conventional-commits-parser/-/conventional-commits-parser-4.0.0.tgz", + "integrity": "sha512-WRv5j1FsVM5FISJkoYMR6tPk07fkKT0UodruX4je86V4owk451yjXAKzKAPOs9l7y59E2viHUS9eQ+dfUA9NSg==", + "dev": true, + "requires": { + "is-text-path": "^1.0.1", + "JSONStream": "^1.3.5", + "meow": "^8.1.2", + "split2": "^3.2.2" + } + } } }, "@semantic-release/error": { @@ -7635,49 +7604,33 @@ } }, "@semantic-release/github": { - "version": "8.0.7", - "resolved": "https://registry.npmjs.org/@semantic-release/github/-/github-8.0.7.tgz", - "integrity": "sha512-VtgicRIKGvmTHwm//iqTh/5NGQwsncOMR5vQK9pMT92Aem7dv37JFKKRuulUsAnUOIlO4G8wH3gPiBAA0iW0ww==", + "version": "9.0.3", + "resolved": "https://registry.npmjs.org/@semantic-release/github/-/github-9.0.3.tgz", + "integrity": "sha512-X6gq4USKVlCxPwIIyXb99jU7gwVWlnsKOevs+OyABRdoqc+OIRITbFmrrYU3eE1vGMGk+Qu/GAoLUQQQwC3YOA==", "dev": true, "requires": { - "@octokit/rest": "^19.0.0", - "@semantic-release/error": "^3.0.0", - "aggregate-error": "^3.0.0", - "bottleneck": "^2.18.1", - "debug": "^4.0.0", - "dir-glob": "^3.0.0", - "fs-extra": "^11.0.0", - "globby": "^11.0.0", - "http-proxy-agent": "^5.0.0", - "https-proxy-agent": "^5.0.0", + "@octokit/core": "^4.2.1", + "@octokit/plugin-paginate-rest": "^7.0.0", + "@octokit/plugin-retry": "^5.0.0", + "@octokit/plugin-throttling": "^6.0.0", + "@semantic-release/error": "^4.0.0", + "aggregate-error": "^4.0.1", + "debug": "^4.3.4", + "dir-glob": "^3.0.1", + "globby": "^13.1.4", + "http-proxy-agent": "^7.0.0", + "https-proxy-agent": "^7.0.0", "issue-parser": "^6.0.0", - "lodash": "^4.17.4", + "lodash-es": "^4.17.21", "mime": "^3.0.0", - "p-filter": "^2.0.0", - "p-retry": "^4.0.0", - "url-join": "^4.0.0" + "p-filter": "^3.0.0", + "url-join": "^5.0.0" }, "dependencies": { - "aggregate-error": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/aggregate-error/-/aggregate-error-3.1.0.tgz", - "integrity": "sha512-4I7Td01quW/RpocfNayFdFVk1qSuoh0E7JrbRJ16nH01HhKFQ88INq9Sd+nd72zqRySlr9BmDA8xlEJ6vJMrYA==", - "dev": true, - "requires": { - "clean-stack": "^2.0.0", - "indent-string": "^4.0.0" - } - }, - "clean-stack": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/clean-stack/-/clean-stack-2.2.0.tgz", - "integrity": "sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A==", - "dev": true - }, - "indent-string": { + "@semantic-release/error": { "version": "4.0.0", - "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz", - "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==", + "resolved": "https://registry.npmjs.org/@semantic-release/error/-/error-4.0.0.tgz", + "integrity": "sha512-mgdxrHTLOjOddRVYIYDo0fR3/v61GNN1YGkfbrjuIKg/uMgCd+Qzo3UAXJ+woLQQpos4pl5Esuw5A7AoNlzjUQ==", "dev": true } } @@ -7718,9 +7671,9 @@ } }, "@semantic-release/release-notes-generator": { - "version": "10.0.3", - "resolved": "https://registry.npmjs.org/@semantic-release/release-notes-generator/-/release-notes-generator-10.0.3.tgz", - "integrity": "sha512-k4x4VhIKneOWoBGHkx0qZogNjCldLPRiAjnIpMnlUh6PtaWXp/T+C9U7/TaNDDtgDa5HMbHl4WlREdxHio6/3w==", + "version": "11.0.1", + "resolved": "https://registry.npmjs.org/@semantic-release/release-notes-generator/-/release-notes-generator-11.0.1.tgz", + "integrity": "sha512-4deWsiY4Rg80oc9Ms11N20BIDgYkPMys4scNYQpi2Njdrtw5Z55nXKNsUN3kn6Sy/nI9dqqbp5L63TL4luI5Bw==", "dev": true, "requires": { "conventional-changelog-angular": "^5.0.0", @@ -7730,73 +7683,11 @@ "debug": "^4.0.0", "get-stream": "^6.0.0", "import-from": "^4.0.0", - "into-stream": "^6.0.0", - "lodash": "^4.17.4", - "read-pkg-up": "^7.0.0" - }, - "dependencies": { - "find-up": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", - "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", - "dev": true, - "requires": { - "locate-path": "^5.0.0", - "path-exists": "^4.0.0" - } - }, - "locate-path": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", - "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", - "dev": true, - "requires": { - "p-locate": "^4.1.0" - } - }, - "p-limit": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", - "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", - "dev": true, - "requires": { - "p-try": "^2.0.0" - } - }, - "p-locate": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", - "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", - "dev": true, - "requires": { - "p-limit": "^2.2.0" - } - }, - "read-pkg-up": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/read-pkg-up/-/read-pkg-up-7.0.1.tgz", - "integrity": "sha512-zK0TB7Xd6JpCLmlLmufqykGE+/TlOePD6qKClNW7hHDKFh/J7/7gCWGR7joEQEW1bKq3a3yUZSObOoWLFQ4ohg==", - "dev": true, - "requires": { - "find-up": "^4.1.0", - "read-pkg": "^5.2.0", - "type-fest": "^0.8.1" - } - }, - "type-fest": { - "version": "0.8.1", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.8.1.tgz", - "integrity": "sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA==", - "dev": true - } + "into-stream": "^7.0.0", + "lodash-es": "^4.17.21", + "read-pkg-up": "^9.0.0" } }, - "@tootallnate/once": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/@tootallnate/once/-/once-2.0.0.tgz", - "integrity": "sha512-XCuKFP5PS55gnMVu3dty8KPatLqUoy/ZYzDzAGCQ8JNFCkLXzmI7vNHCR+XpbZaMWQK/vQubr7PkYq8g470J/A==", - "dev": true - }, "@types/minimist": { "version": "1.2.2", "resolved": "https://registry.npmjs.org/@types/minimist/-/minimist-1.2.2.tgz", @@ -7809,19 +7700,13 @@ "integrity": "sha512-Gj7cI7z+98M282Tqmp2K5EIsoouUEzbBJhQQzDE3jSIRk6r9gsz0oUokqIUR4u1R3dMHo0pDHM7sNOHyhulypw==", "dev": true }, - "@types/retry": { - "version": "0.12.0", - "resolved": "https://registry.npmjs.org/@types/retry/-/retry-0.12.0.tgz", - "integrity": "sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA==", - "dev": true - }, "agent-base": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-6.0.2.tgz", - "integrity": "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==", + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.0.tgz", + "integrity": "sha512-o/zjMZRhJxny7OyEF+Op8X+efiELC7k7yOjMzgfzVqOzXqkBkWI79YoTdOtsuWd5BWhAGAuOY/Xa6xpiaWXiNg==", "dev": true, "requires": { - "debug": "4" + "debug": "^4.3.4" } }, "aggregate-error": { @@ -7890,12 +7775,6 @@ "integrity": "sha512-c5AMf34bKdvPhQ7tBGhqkgKNUzMr4WUs+WDtC2ZUGOUncbxKMTvqxYctiseW3+L4bA8ec+GcZ6/A/FW4m8ukng==", "dev": true }, - "array-union": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz", - "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==", - "dev": true - }, "arrify": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/arrify/-/arrify-1.0.1.tgz", @@ -8040,14 +7919,12 @@ } }, "conventional-changelog-conventionalcommits": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/conventional-changelog-conventionalcommits/-/conventional-changelog-conventionalcommits-5.0.0.tgz", - "integrity": "sha512-lCDbA+ZqVFQGUj7h9QBKoIpLhl8iihkO0nCTyRNzuXtcd7ubODpYB04IFy31JloiJgG0Uovu8ot8oxRzn7Nwtw==", + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/conventional-changelog-conventionalcommits/-/conventional-changelog-conventionalcommits-6.1.0.tgz", + "integrity": "sha512-3cS3GEtR78zTfMzk0AizXKKIdN4OvSh7ibNz6/DPbhWWQu7LqE/8+/GqSodV+sywUR2gpJAdP/1JFf4XtN7Zpw==", "dev": true, "requires": { - "compare-func": "^2.0.0", - "lodash": "^4.17.15", - "q": "^1.5.1" + "compare-func": "^2.0.0" } }, "conventional-changelog-writer": { @@ -8224,38 +8101,13 @@ } }, "env-ci": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/env-ci/-/env-ci-8.0.0.tgz", - "integrity": "sha512-W+3BqGZozFua9MPeXpmTm5eYEBtGgL76jGu/pwMVp/L8PdECSCEWaIp7d4Mw7kuUrbUldK0oV0bNd6ZZjLiMiA==", + "version": "9.0.0", + "resolved": "https://registry.npmjs.org/env-ci/-/env-ci-9.0.0.tgz", + "integrity": "sha512-Q3cjr1tX9xwigprw4G8M3o7PIOO/1LYji6TyGsbD1WfMmD23etZvhgmPXJqkP788yH4dgSSK7oaIMuaayUJIfg==", "dev": true, "requires": { - "execa": "^6.1.0", + "execa": "^7.0.0", "java-properties": "^1.0.2" - }, - "dependencies": { - "execa": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/execa/-/execa-6.1.0.tgz", - "integrity": "sha512-QVWlX2e50heYJcCPG0iWtf8r0xjEYfz/OYLGDYH+IyjWezzPNxz63qNFOu0l4YftGWuizFVZHHs8PrLU5p2IDA==", - "dev": true, - "requires": { - "cross-spawn": "^7.0.3", - "get-stream": "^6.0.1", - "human-signals": "^3.0.1", - "is-stream": "^3.0.0", - "merge-stream": "^2.0.0", - "npm-run-path": "^5.1.0", - "onetime": "^6.0.0", - "signal-exit": "^3.0.7", - "strip-final-newline": "^3.0.0" - } - }, - "human-signals": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-3.0.1.tgz", - "integrity": "sha512-rQLskxnM/5OCldHo+wNXbpVgDn5A17CUoKX+7Sokwaknlq7CdSnphy0W39GU8dw59XiCXmFXDg4fRuckQRKewQ==", - "dev": true - } } }, "error-ex": { @@ -8297,9 +8149,9 @@ } }, "fast-glob": { - "version": "3.2.12", - "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.2.12.tgz", - "integrity": "sha512-DVj4CQIYYow0BlaelwK1pHl5n5cRSJfM60UA0zK891sVInoPri2Ekj7+e1CT3/3qxXenpI+nBBmQAcJPJgaj4w==", + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.0.tgz", + "integrity": "sha512-ChDuvbOypPuNjO8yIDf36x7BlZX1smcUMTTcyoIjycexOxd6DFsKsg21qVBzEmr3G7fUKIRy2/psii+CIUt7FA==", "dev": true, "requires": { "@nodelib/fs.stat": "^2.0.2", @@ -8307,17 +8159,6 @@ "glob-parent": "^5.1.2", "merge2": "^1.3.0", "micromatch": "^4.0.4" - }, - "dependencies": { - "glob-parent": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", - "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", - "dev": true, - "requires": { - "is-glob": "^4.0.1" - } - } } }, "fastq": { @@ -8439,18 +8280,26 @@ } } }, + "glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "requires": { + "is-glob": "^4.0.1" + } + }, "globby": { - "version": "11.1.0", - "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz", - "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==", + "version": "13.2.0", + "resolved": "https://registry.npmjs.org/globby/-/globby-13.2.0.tgz", + "integrity": "sha512-jWsQfayf13NvqKUIL3Ta+CIqMnvlaIDFveWE/dpOZ9+3AMEJozsxDvKA02zync9UuvOM8rOXzsD5GqKP4OnWPQ==", "dev": true, "requires": { - "array-union": "^2.1.0", "dir-glob": "^3.0.1", - "fast-glob": "^3.2.9", + "fast-glob": "^3.2.11", "ignore": "^5.2.0", "merge2": "^1.4.1", - "slash": "^3.0.0" + "slash": "^4.0.0" } }, "graceful-fs": { @@ -8509,23 +8358,22 @@ } }, "http-proxy-agent": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-5.0.0.tgz", - "integrity": "sha512-n2hY8YdoRE1i7r6M0w9DIw5GgZN0G25P8zLCRQ8rjXtTU3vsNFBI/vWK/UIeE6g5MUUz6avwAPXmL6Fy9D/90w==", + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-7.0.0.tgz", + "integrity": "sha512-+ZT+iBxVUQ1asugqnD6oWoRiS25AkjNfG085dKJGtGxkdwLQrMKU5wJr2bOOFAXzKcTuqq+7fZlTMgG3SRfIYQ==", "dev": true, "requires": { - "@tootallnate/once": "2", - "agent-base": "6", - "debug": "4" + "agent-base": "^7.1.0", + "debug": "^4.3.4" } }, "https-proxy-agent": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-5.0.1.tgz", - "integrity": "sha512-dFcAjpTQFgoLMzC2VwU+C/CbS7uRL0lWmxDITmqm7C+7F0Odmj6s9l6alZc6AELXhrnggM2CeWSXHGOdX2YtwA==", + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.0.tgz", + "integrity": "sha512-0euwPCRyAPSgGdzD1IVN9nJYHtBhJwb6XPfbpQcYbPCwrBidX6GzxmchnaF4sfF/jPb74Ojx5g4yTg3sixlyPw==", "dev": true, "requires": { - "agent-base": "6", + "agent-base": "^7.0.2", "debug": "4" } }, @@ -8576,9 +8424,9 @@ "dev": true }, "into-stream": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/into-stream/-/into-stream-6.0.0.tgz", - "integrity": "sha512-XHbaOAvP+uFKUFsOgoNPRjLkwB+I22JFPFe5OjTkQ0nwgj6+pSjb4NmB6VMxaPshLiOf+zcpOCBQuLwC1KHhZA==", + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/into-stream/-/into-stream-7.0.0.tgz", + "integrity": "sha512-2dYz766i9HprMBasCMvHMuazJ7u4WzhJwo5kb3iPSiW/iRYV6uPari3zHoqZlnuaR7V1bEiNMxikhp37rdBXbw==", "dev": true, "requires": { "from2": "^2.3.0", @@ -8850,7 +8698,8 @@ "version": "4.2.12", "resolved": "https://registry.npmjs.org/marked/-/marked-4.2.12.tgz", "integrity": "sha512-yr8hSKa3Fv4D3jdZmtMMPghgVt6TWbk86WQaWhDloQjRSQhMMYCAro7jP7VDJrjjdV8pxVxMssXS8B8Y5DZ5aw==", - "dev": true + "dev": true, + "peer": true }, "marked-terminal": { "version": "5.1.1", @@ -9048,9 +8897,9 @@ } }, "node-fetch": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.6.9.tgz", - "integrity": "sha512-DJm/CJkZkRjKKj4Zi4BsKVZh3ValV5IR5s7LVZnW+6YMh0W1BfNA8XSs6DLMGYlId5F3KnA70uu2qepcR08Qqg==", + "version": "2.6.12", + "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.6.12.tgz", + "integrity": "sha512-C/fGU2E8ToujUivIO0H+tpQ6HWo4eEmchoPIoXtxCrVghxdKq+QOHqEZW7tuP3KlV3bC8FRMO5nMCC7Zm1VP6g==", "dev": true, "requires": { "whatwg-url": "^5.0.0" @@ -11162,12 +11011,12 @@ "dev": true }, "p-filter": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/p-filter/-/p-filter-2.1.0.tgz", - "integrity": "sha512-ZBxxZ5sL2HghephhpGAQdoskxplTwr7ICaehZwLIlfL6acuVgZPm8yBNuRAFBGEqtD/hmUeq9eqLg2ys9Xr/yw==", + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/p-filter/-/p-filter-3.0.0.tgz", + "integrity": "sha512-QtoWLjXAW++uTX67HZQz1dbTpqBfiidsB6VtQUC9iR85S120+s0T5sO6s+B5MLzFcZkrEd/DGMmCjR+f2Qpxwg==", "dev": true, "requires": { - "p-map": "^2.0.0" + "p-map": "^5.1.0" } }, "p-is-promise": { @@ -11177,10 +11026,13 @@ "dev": true }, "p-map": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/p-map/-/p-map-2.1.0.tgz", - "integrity": "sha512-y3b8Kpd8OAN444hxfBbFfj1FY/RjtTd8tzYwhUqNYXx0fXx2iX4maP4Qr6qhIKbQXI02wTLAda4fYUbDagTUFw==", - "dev": true + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/p-map/-/p-map-5.5.0.tgz", + "integrity": "sha512-VFqfGDHlx87K66yZrNdI4YGtD70IRyd+zSvgks6mzHPRNkoKy+9EKP4SFC77/vTTQYmRmti7dvqC+m5jBrBAcg==", + "dev": true, + "requires": { + "aggregate-error": "^4.0.0" + } }, "p-reduce": { "version": "3.0.0", @@ -11188,16 +11040,6 @@ "integrity": "sha512-xsrIUgI0Kn6iyDYm9StOpOeK29XM1aboGji26+QEortiFST1hGZaUQOLhtEbqHErPpGW/aSz6allwK2qcptp0Q==", "dev": true }, - "p-retry": { - "version": "4.6.2", - "resolved": "https://registry.npmjs.org/p-retry/-/p-retry-4.6.2.tgz", - "integrity": "sha512-312Id396EbJdvRONlngUx0NydfrIQ5lsYu0znKVUzVvArzEIt08V1qhtyESbGVd1FGX7UKtiFp5uwKZdM8wIuQ==", - "dev": true, - "requires": { - "@types/retry": "0.12.0", - "retry": "^0.13.1" - } - }, "p-try": { "version": "2.2.0", "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", @@ -11571,12 +11413,6 @@ "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", "dev": true }, - "retry": { - "version": "0.13.1", - "resolved": "https://registry.npmjs.org/retry/-/retry-0.13.1.tgz", - "integrity": "sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg==", - "dev": true - }, "reusify": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz", @@ -11599,20 +11435,20 @@ "dev": true }, "semantic-release": { - "version": "21.0.0", - "resolved": "https://registry.npmjs.org/semantic-release/-/semantic-release-21.0.0.tgz", - "integrity": "sha512-zks0jVk2Hbyhn014vshcwQ6e6gM9jDPr8SdujqfAzPJBvvvSXa8GHz/x+W0VaW2aBNawWFAlx6N45dp1H1XCCw==", + "version": "21.0.6", + "resolved": "https://registry.npmjs.org/semantic-release/-/semantic-release-21.0.6.tgz", + "integrity": "sha512-NDyosObAwUNzPpdf+mpL49Xy+5iYHjdWM34LBNdbdYv9vBLbw+eCCDihxcqPh+f9m4ZzlBrYCkHUaZv2vPGW9A==", "dev": true, "requires": { - "@semantic-release/commit-analyzer": "^9.0.2", - "@semantic-release/error": "^3.0.0", - "@semantic-release/github": "^8.0.0", + "@semantic-release/commit-analyzer": "^10.0.0", + "@semantic-release/error": "^4.0.0", + "@semantic-release/github": "^9.0.0", "@semantic-release/npm": "^10.0.2", - "@semantic-release/release-notes-generator": "^10.0.0", + "@semantic-release/release-notes-generator": "^11.0.0", "aggregate-error": "^4.0.1", "cosmiconfig": "^8.0.0", "debug": "^4.0.0", - "env-ci": "^8.0.0", + "env-ci": "^9.0.0", "execa": "^7.0.0", "figures": "^5.0.0", "find-versions": "^5.1.0", @@ -11621,7 +11457,7 @@ "hook-std": "^3.0.0", "hosted-git-info": "^6.0.0", "lodash-es": "^4.17.21", - "marked": "^4.1.0", + "marked": "^5.0.0", "marked-terminal": "^5.1.1", "micromatch": "^4.0.2", "p-each-series": "^3.0.0", @@ -11634,6 +11470,18 @@ "yargs": "^17.5.1" }, "dependencies": { + "@semantic-release/error": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/@semantic-release/error/-/error-4.0.0.tgz", + "integrity": "sha512-mgdxrHTLOjOddRVYIYDo0fR3/v61GNN1YGkfbrjuIKg/uMgCd+Qzo3UAXJ+woLQQpos4pl5Esuw5A7AoNlzjUQ==", + "dev": true + }, + "marked": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/marked/-/marked-5.1.0.tgz", + "integrity": "sha512-z3/nBe7aTI8JDszlYLk7dDVNpngjw0o1ZJtrA9kIfkkHcIF+xH7mO23aISl4WxP83elU+MFROgahqdpd05lMEQ==", + "dev": true + }, "resolve-from": { "version": "5.0.0", "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz", @@ -11777,9 +11625,9 @@ } }, "slash": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", - "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-4.0.0.tgz", + "integrity": "sha512-3dOsAHXXUkQTpOYcoAxLIorMTp4gIQr5IW3iVb7A7lFIp0VHhnynm9izx6TssdrIcVIESAlVjtnO2K8bg+Coew==", "dev": true }, "source-map": { @@ -12070,9 +11918,9 @@ "dev": true }, "url-join": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/url-join/-/url-join-4.0.1.tgz", - "integrity": "sha512-jk1+QP6ZJqyOiuEI9AEWQfju/nB2Pw466kbA0LEZljHwKeMgd9WrAEgEGxjPDD2+TNbbb37rTyhEfrCXfuKXnA==", + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/url-join/-/url-join-5.0.0.tgz", + "integrity": "sha512-n2huDr9h9yzd6exQVnH/jU5mr+Pfx08LRXXZhkLLetAMESRj+anQsTAh940iMrIetKAmry9coFuZQ2jY8/p3WA==", "dev": true }, "util-deprecate": { diff --git a/package.json b/package.json index 4a82d1e1..0a6fa26a 100644 --- a/package.json +++ b/package.json @@ -8,7 +8,7 @@ "@semantic-release/changelog": "^6.0.3", "@semantic-release/exec": "^6.0.3", "@semantic-release/git": "^10.0.1", - "conventional-changelog-conventionalcommits": "^5.0.0", - "semantic-release": "^21.0.0" + "conventional-changelog-conventionalcommits": "^6.1.0", + "semantic-release": "^21.0.6" } } diff --git a/poetry.lock b/poetry.lock index 5504ee8c..e8761f52 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,10 +1,9 @@ -# This file is automatically @generated by Poetry 1.4.2 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.5.1 and should not be changed by hand. [[package]] name = "anyio" version = "3.6.2" description = "High level compatibility layer for multiple asynchronous event loop implementations" -category = "dev" optional = false python-versions = ">=3.6.2" files = [ @@ -25,7 +24,6 @@ trio = ["trio (>=0.16,<0.22)"] name = "appnope" version = "0.1.3" description = "Disable App Nap on macOS >= 10.9" -category = "dev" optional = false python-versions = "*" files = [ @@ -37,7 +35,6 @@ files = [ name = "argon2-cffi" version = "21.3.0" description = "The secure Argon2 password hashing algorithm." -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -57,7 +54,6 @@ tests = ["coverage[toml] (>=5.0.2)", "hypothesis", "pytest"] name = "argon2-cffi-bindings" version = "21.2.0" description = "Low-level CFFI bindings for Argon2" -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -95,7 +91,6 @@ tests = ["pytest"] name = "arrow" version = "1.2.3" description = "Better dates & times for Python" -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -108,14 +103,13 @@ python-dateutil = ">=2.7.0" [[package]] name = "astroid" -version = "2.15.1" +version = "2.15.5" description = "An abstract syntax tree for Python with inference support." -category = "main" optional = false python-versions = ">=3.7.2" files = [ - {file = "astroid-2.15.1-py3-none-any.whl", hash = "sha256:89860bda98fe2bbd1f5d262229be7629d778ce280de68d95d4a73d1f592ad268"}, - {file = "astroid-2.15.1.tar.gz", hash = "sha256:af4e0aff46e2868218502789898269ed95b663fba49e65d91c1e09c966266c34"}, + {file = "astroid-2.15.5-py3-none-any.whl", hash = "sha256:078e5212f9885fa85fbb0cf0101978a336190aadea6e13305409d099f71b2324"}, + {file = "astroid-2.15.5.tar.gz", hash = "sha256:1039262575027b441137ab4a62a793a9b43defb42c32d5670f38686207cd780f"}, ] [package.dependencies] @@ -130,7 +124,6 @@ wrapt = [ name = "asttokens" version = "2.2.1" description = "Annotate AST trees with source code positions" -category = "dev" optional = false python-versions = "*" files = [ @@ -148,7 +141,6 @@ test = ["astroid", "pytest"] name = "attrs" version = "22.2.0" description = "Classes Without Boilerplate" -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -167,7 +159,6 @@ tests-no-zope = ["cloudpickle", "cloudpickle", "hypothesis", "hypothesis", "mypy name = "backcall" version = "0.2.0" description = "Specifications for callback functions passed in to an API" -category = "dev" optional = false python-versions = "*" files = [ @@ -179,7 +170,6 @@ files = [ name = "beautifulsoup4" version = "4.11.2" description = "Screen-scraping library" -category = "dev" optional = false python-versions = ">=3.6.0" files = [ @@ -198,7 +188,6 @@ lxml = ["lxml"] name = "black" version = "23.3.0" description = "The uncompromising code formatter." -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -247,7 +236,6 @@ uvloop = ["uvloop (>=0.15.2)"] name = "bleach" version = "6.0.0" description = "An easy safelist-based HTML-sanitizing tool." -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -266,7 +254,6 @@ css = ["tinycss2 (>=1.1.0,<1.2)"] name = "blis" version = "0.7.9" description = "The Blis BLAS-like linear algebra library, as a self-contained C-extension." -category = "main" optional = false python-versions = "*" files = [ @@ -307,7 +294,6 @@ numpy = ">=1.15.0" name = "catalogue" version = "2.0.8" description = "Super lightweight function registries for your library" -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -319,7 +305,6 @@ files = [ name = "certifi" version = "2022.12.7" description = "Python package for providing Mozilla's CA Bundle." -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -331,7 +316,6 @@ files = [ name = "cffi" version = "1.15.1" description = "Foreign Function Interface for Python calling C code." -category = "dev" optional = false python-versions = "*" files = [ @@ -408,7 +392,6 @@ pycparser = "*" name = "charset-normalizer" version = "3.1.0" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." -category = "main" optional = false python-versions = ">=3.7.0" files = [ @@ -493,7 +476,6 @@ files = [ name = "click" version = "8.1.3" description = "Composable command line interface toolkit" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -508,7 +490,6 @@ colorama = {version = "*", markers = "platform_system == \"Windows\""} name = "colorama" version = "0.4.6" description = "Cross-platform colored terminal text." -category = "main" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" files = [ @@ -520,7 +501,6 @@ files = [ name = "comm" version = "0.1.2" description = "Jupyter Python Comm implementation, for usage in ipykernel, xeus-python etc." -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -538,7 +518,6 @@ test = ["pytest"] name = "confection" version = "0.0.4" description = "The sweetest config system for Python" -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -554,7 +533,6 @@ srsly = ">=2.4.0,<3.0.0" name = "coverage" version = "7.2.2" description = "Code coverage measurement for Python" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -621,7 +599,6 @@ toml = ["tomli"] name = "cymem" version = "2.0.7" description = "Manage calls to calloc/free through Cython" -category = "main" optional = false python-versions = "*" files = [ @@ -659,7 +636,6 @@ files = [ name = "debugpy" version = "1.6.6" description = "An implementation of the Debug Adapter Protocol for Python" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -686,7 +662,6 @@ files = [ name = "decorator" version = "5.1.1" description = "Decorators for Humans" -category = "dev" optional = false python-versions = ">=3.5" files = [ @@ -698,7 +673,6 @@ files = [ name = "defusedxml" version = "0.7.1" description = "XML bomb protection for Python stdlib modules" -category = "dev" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" files = [ @@ -710,7 +684,6 @@ files = [ name = "docstring-parser" version = "0.15" description = "Parse Python docstrings in reST, Google and Numpydoc format" -category = "main" optional = false python-versions = ">=3.6,<4.0" files = [ @@ -722,7 +695,6 @@ files = [ name = "exceptiongroup" version = "1.1.1" description = "Backport of PEP 654 (exception groups)" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -737,7 +709,6 @@ test = ["pytest (>=6)"] name = "executing" version = "1.2.0" description = "Get the currently executing AST node of a frame, and other information" -category = "dev" optional = false python-versions = "*" files = [ @@ -752,7 +723,6 @@ tests = ["asttokens", "littleutils", "pytest", "rich"] name = "fastjsonschema" version = "2.16.3" description = "Fastest Python implementation of JSON schema" -category = "dev" optional = false python-versions = "*" files = [ @@ -767,7 +737,6 @@ devel = ["colorama", "json-spec", "jsonschema", "pylint", "pytest", "pytest-benc name = "fqdn" version = "1.5.1" description = "Validates fully-qualified domain names against RFC 1123, so that they are acceptable to modern bowsers" -category = "dev" optional = false python-versions = ">=2.7, !=3.0, !=3.1, !=3.2, !=3.3, !=3.4, <4" files = [ @@ -779,7 +748,6 @@ files = [ name = "ghp-import" version = "2.1.0" description = "Copy your docs directly to the gh-pages branch." -category = "dev" optional = false python-versions = "*" files = [ @@ -797,7 +765,6 @@ dev = ["flake8", "markdown", "twine", "wheel"] name = "griffe" version = "0.25.5" description = "Signatures for entire Python programs. Extract the structure, the frame, the skeleton of your project, to generate API documentation or find breaking changes in your API." -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -815,7 +782,6 @@ async = ["aiofiles (>=0.7,<1.0)"] name = "idna" version = "3.4" description = "Internationalized Domain Names in Applications (IDNA)" -category = "main" optional = false python-versions = ">=3.5" files = [ @@ -827,7 +793,6 @@ files = [ name = "iniconfig" version = "2.0.0" description = "brain-dead simple config-ini parsing" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -839,7 +804,6 @@ files = [ name = "ipykernel" version = "6.21.3" description = "IPython Kernel for Jupyter" -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -853,7 +817,7 @@ comm = ">=0.1.1" debugpy = ">=1.6.5" ipython = ">=7.23.1" jupyter-client = ">=6.1.12" -jupyter-core = ">=4.12,<5.0.0 || >=5.1.0" +jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0" matplotlib-inline = ">=0.1" nest-asyncio = "*" packaging = "*" @@ -873,7 +837,6 @@ test = ["flaky", "ipyparallel", "pre-commit", "pytest (>=7.0)", "pytest-asyncio" name = "ipython" version = "8.11.0" description = "IPython: Productive Interactive Computing" -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -912,7 +875,6 @@ test-extra = ["curio", "matplotlib (!=3.2.0)", "nbformat", "numpy (>=1.21)", "pa name = "ipython-genutils" version = "0.2.0" description = "Vestigial utilities from IPython" -category = "dev" optional = false python-versions = "*" files = [ @@ -924,7 +886,6 @@ files = [ name = "ipywidgets" version = "8.0.4" description = "Jupyter interactive widgets" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -946,7 +907,6 @@ test = ["jsonschema", "pytest (>=3.6.0)", "pytest-cov", "pytz"] name = "isoduration" version = "20.11.0" description = "Operations with ISO 8601 durations" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -961,7 +921,6 @@ arrow = ">=0.15.0" name = "jedi" version = "0.18.2" description = "An autocompletion tool for Python that can be used for text editors." -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -981,7 +940,6 @@ testing = ["Django (<3.1)", "attrs", "colorama", "docopt", "pytest (<7.0.0)"] name = "jinja2" version = "3.1.2" description = "A very fast and expressive template engine." -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -999,7 +957,6 @@ i18n = ["Babel (>=2.7)"] name = "jsonpointer" version = "2.3" description = "Identify specific nodes in a JSON document (RFC 6901)" -category = "dev" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" files = [ @@ -1011,7 +968,6 @@ files = [ name = "jsonschema" version = "4.17.3" description = "An implementation of JSON Schema validation for Python" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -1039,7 +995,6 @@ format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339- name = "jupyter" version = "1.0.0" description = "Jupyter metapackage. Install all the Jupyter components in one go." -category = "dev" optional = false python-versions = "*" files = [ @@ -1060,7 +1015,6 @@ qtconsole = "*" name = "jupyter-client" version = "8.0.3" description = "Jupyter protocol implementation and client libraries" -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -1069,7 +1023,7 @@ files = [ ] [package.dependencies] -jupyter-core = ">=4.12,<5.0.0 || >=5.1.0" +jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0" python-dateutil = ">=2.8.2" pyzmq = ">=23.0" tornado = ">=6.2" @@ -1083,7 +1037,6 @@ test = ["codecov", "coverage", "ipykernel (>=6.14)", "mypy", "paramiko", "pre-co name = "jupyter-console" version = "6.6.3" description = "Jupyter terminal console" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -1095,7 +1048,7 @@ files = [ ipykernel = ">=6.14" ipython = "*" jupyter-client = ">=7.0.0" -jupyter-core = ">=4.12,<5.0.0 || >=5.1.0" +jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0" prompt-toolkit = ">=3.0.30" pygments = "*" pyzmq = ">=17" @@ -1108,7 +1061,6 @@ test = ["flaky", "pexpect", "pytest"] name = "jupyter-core" version = "5.3.0" description = "Jupyter core package. A base package on which Jupyter projects rely." -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -1129,7 +1081,6 @@ test = ["ipykernel", "pre-commit", "pytest", "pytest-cov", "pytest-timeout"] name = "jupyter-events" version = "0.6.3" description = "Jupyter Event System library" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -1154,7 +1105,6 @@ test = ["click", "coverage", "pre-commit", "pytest (>=7.0)", "pytest-asyncio (>= name = "jupyter-server" version = "2.5.0" description = "The backend—i.e. core services, APIs, and REST endpoints—to Jupyter web applications." -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -1167,7 +1117,7 @@ anyio = ">=3.1.0" argon2-cffi = "*" jinja2 = "*" jupyter-client = ">=7.4.4" -jupyter-core = ">=4.12,<5.0.0 || >=5.1.0" +jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0" jupyter-events = ">=0.4.0" jupyter-server-terminals = "*" nbconvert = ">=6.4.4" @@ -1190,7 +1140,6 @@ test = ["ipykernel", "pre-commit", "pytest (>=7.0)", "pytest-console-scripts", " name = "jupyter-server-terminals" version = "0.4.4" description = "A Jupyter Server Extension Providing Terminals." -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -1210,7 +1159,6 @@ test = ["coverage", "jupyter-server (>=2.0.0)", "pytest (>=7.0)", "pytest-cov", name = "jupyterlab-pygments" version = "0.2.2" description = "Pygments theme using JupyterLab CSS variables" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -1222,7 +1170,6 @@ files = [ name = "jupyterlab-widgets" version = "3.0.5" description = "Jupyter interactive widgets for JupyterLab" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -1234,7 +1181,6 @@ files = [ name = "jupytext" version = "1.14.5" description = "Jupyter notebooks as Markdown documents, Julia, Python or R scripts" -category = "dev" optional = false python-versions = "~=3.6" files = [ @@ -1257,7 +1203,6 @@ toml = ["toml"] name = "langcodes" version = "3.3.0" description = "Tools for labeling human languages with IETF language tags" -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -1272,7 +1217,6 @@ data = ["language-data (>=1.1,<2.0)"] name = "lazy-object-proxy" version = "1.9.0" description = "A fast and thorough lazy object proxy." -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -1316,124 +1260,122 @@ files = [ [[package]] name = "levenshtein" -version = "0.20.9" +version = "0.21.1" description = "Python extension for computing string edit distances and similarities." -category = "main" optional = false python-versions = ">=3.6" files = [ - {file = "Levenshtein-0.20.9-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:105c239ec786750cd5136991c58196b440cc39b6acf3ec8227f6562c9a94e4b9"}, - {file = "Levenshtein-0.20.9-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2f7728bea7fe6dc55ceecde0dcda4287e74fe3b6733ad42530f46aaa8d2f81d0"}, - {file = "Levenshtein-0.20.9-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:cc7eca755c13c92814c8cce8175524cf764ce38f39228b602f59eac58cfdc51a"}, - {file = "Levenshtein-0.20.9-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e8a552e79d053dc1324fb90d342447fd4e15736f4cbc5363b6fbd5577f53dce9"}, - {file = "Levenshtein-0.20.9-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5474b2681ee0b7944fb1e7fe281cd44e2dfe75b03ba4558dca49c96fa0861b62"}, - {file = "Levenshtein-0.20.9-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:56e132c203b0dd8fc72a33e791c39ad0d5a25bcf24b130a1e202abbf489a3e75"}, - {file = "Levenshtein-0.20.9-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3badc94708ac05b405e795fde58a53272b90a9ee6099ecd54a345658b7b812e1"}, - {file = "Levenshtein-0.20.9-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:48b9b3ae095b14dad7bc4bd219c7cd9113a7aa123a033337c85b00fe2ed565d3"}, - {file = "Levenshtein-0.20.9-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0d3a1f7328c91caeb1f857ddd2787e3f19d60cc2c688339d249ca8841da61454"}, - {file = "Levenshtein-0.20.9-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:ef67c50428c99caf67d31bd209da21d9378da5f0cc3ad4f7bafb6caa78aee6f2"}, - {file = "Levenshtein-0.20.9-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:47f6d1592c0891f7355e38a302becd233336ca2f55f9a8be3a8635f946a6784f"}, - {file = "Levenshtein-0.20.9-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:2891019740e874f05e0349e9f27b6af8ad837b1612f42e9c90c296d54d1404fd"}, - {file = "Levenshtein-0.20.9-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c554704eec4f4ba742febdcc79a85491f8f9a1d493cb103bb2af18536d6cf122"}, - {file = "Levenshtein-0.20.9-cp310-cp310-win32.whl", hash = "sha256:7628e356b3f9c78ad7272c3b9137f0641a1368849e749ff6f2c8fe372795806b"}, - {file = "Levenshtein-0.20.9-cp310-cp310-win_amd64.whl", hash = "sha256:ba2bafe3511194a37044cae4e7d328cca70657933052691c37eba2ca428a379d"}, - {file = "Levenshtein-0.20.9-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:7605a94145198d19fdaaa7e29c0f8a56ad719b12386f3ae8cd8ed4cb9fa6c2e4"}, - {file = "Levenshtein-0.20.9-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:29db4dabfad2ddf33c7986eb6fd525c7587cca4c4d9e187365cff0a5281f5a35"}, - {file = "Levenshtein-0.20.9-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:965336c1772a4fc5fb2686a2a0bfaf3455dced96f19f50f278da8bc139076d31"}, - {file = "Levenshtein-0.20.9-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:67235753035ac898d6475c0b29540521018db2e0027a3c1deb9aa0af0a84fd74"}, - {file = "Levenshtein-0.20.9-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:120dca58136aee3d8c7b190e30db7b6a6eb9579ea5712df84ad076a389801743"}, - {file = "Levenshtein-0.20.9-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6496ea66a6f755e48c0d82f1eee396d16edcd5592d4b3677d26fa789a636a728"}, - {file = "Levenshtein-0.20.9-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0af20327acc2c904d11611cb3a0d8d17f80c279a12e0b84189eafc35297186d"}, - {file = "Levenshtein-0.20.9-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:34d2f891ef53afbab6cf2eeb92ff13151884d17dc80a2d6d3c7ae74d7738b772"}, - {file = "Levenshtein-0.20.9-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:2ab9c72380582bf4745d1c5b055b1df0c85f7a980a04bd7603a855dd91478c0f"}, - {file = "Levenshtein-0.20.9-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:6de13be3eb5ac48053fb1635a7b4daa936b9114ad4b264942e9eb709fcaa41dd"}, - {file = "Levenshtein-0.20.9-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:a9fc296860588251d8d72b4f4637cca4eef7351e042a7a23d44e6385aef1e160"}, - {file = "Levenshtein-0.20.9-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:35777b20fe35858248c22da37984469e6dd1278f55d17c53378312853d5d683d"}, - {file = "Levenshtein-0.20.9-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:6b9e0642ddb4c431f77c38cec9edbd0317e26c3f37d072ccf281ab58926dce69"}, - {file = "Levenshtein-0.20.9-cp311-cp311-win32.whl", hash = "sha256:f88ec322d86d3cc9d3936dbf6b421ad813950c2658599d48ac4ede59f2a6047e"}, - {file = "Levenshtein-0.20.9-cp311-cp311-win_amd64.whl", hash = "sha256:2907a6888455f9915d5b656f5d058f63eaf6063b2c7f0f1ff6bc05706ae5bc39"}, - {file = "Levenshtein-0.20.9-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:6bcebc79760be08488cb921732af34ade6abc7476a94866881c68b45ec4b6c82"}, - {file = "Levenshtein-0.20.9-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:47d8d4f3825d1d8f3b19382537a8536e689cf57aaa224d2cb4f44cf844811885"}, - {file = "Levenshtein-0.20.9-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9d40e18a5817ee7f0675401613a26c492fd4ea68d2103c1480fb5a6ab1b8763d"}, - {file = "Levenshtein-0.20.9-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4d258f3d44f6bac17f33002fea34570049507d3476c3716b5267170c666b20b4"}, - {file = "Levenshtein-0.20.9-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c621e0c389546147ed43c33ca4168de0f91c920508ab8a94a400835fa084f486"}, - {file = "Levenshtein-0.20.9-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57a31527dc7994353091626e62b7d82d53290cb00df48d3e5d29cb291fb4c03c"}, - {file = "Levenshtein-0.20.9-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:129c8f192e656b7c2c543bf0d704d677720771b8bc2f30c50db02fbc2001bac2"}, - {file = "Levenshtein-0.20.9-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:5a01fca58255be6bf724a40af2575d7cf644c099c28a00d1f5f6a81675e60e7d"}, - {file = "Levenshtein-0.20.9-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:4c13749ea39a228f05d5bd9d473e76f726fc2dcd493cafc322f740921a6eeffb"}, - {file = "Levenshtein-0.20.9-cp36-cp36m-musllinux_1_1_s390x.whl", hash = "sha256:69daa0f8eefa5b947255a81346741ed86fe7030e0909741dbd978e38b30da3fd"}, - {file = "Levenshtein-0.20.9-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:fcc78a73ed423bbb09ac902dd2e1ff1094d159d1c6766e5e52da5f376a4cba18"}, - {file = "Levenshtein-0.20.9-cp36-cp36m-win32.whl", hash = "sha256:d82ae57982a9f33c55778f1f0f63d5e51e291aee236abed3b90497578b944202"}, - {file = "Levenshtein-0.20.9-cp36-cp36m-win_amd64.whl", hash = "sha256:4082379b406752fc1173ed1f8c3a122c5d5491e10e564ed721602e4e049e3d4c"}, - {file = "Levenshtein-0.20.9-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:cb499783b7126e6fc45c39ab34c8114148425c5d975b1ce35e6c47c0eda58a94"}, - {file = "Levenshtein-0.20.9-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7ce747b296aad3bd8a563cccf2119cf37bf72f668076bfdad6ec55f0a0596dd9"}, - {file = "Levenshtein-0.20.9-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1347c3ebbe8f42f7a487e8d23a95bde6529379b4939ad51d32246d001565c499"}, - {file = "Levenshtein-0.20.9-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2a2f1c1e8360603a6da29416da61d1907a27656843e269413091c8c3a3e6286e"}, - {file = "Levenshtein-0.20.9-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:73c1caaedbee3617fd29139aac8dab7743776b59c3c1fed2790308ecb43c7b25"}, - {file = "Levenshtein-0.20.9-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a1f24133df69f8b618fc508d6023695130ad3c3c8968ef43aaeca21835eb337a"}, - {file = "Levenshtein-0.20.9-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:cf7260722f8170c09af5cfa714bb45626a4dfc85d71d1c1c9c52c2a6901cc501"}, - {file = "Levenshtein-0.20.9-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:01668178fd9244df290db0340293982fe7641162a12a35ad9ffb3fe145ce6377"}, - {file = "Levenshtein-0.20.9-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:1e46f9d3483dc4991ac60ff3711b0d40f93e352cc8edc16b68df57ccc472bd6c"}, - {file = "Levenshtein-0.20.9-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:680cd250dc1875eb80cf2a0cca742bd13f6f9ab11c48317244fcc483eba1dd67"}, - {file = "Levenshtein-0.20.9-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:2346e2f7dfbbc2936bd81e19f7734984e72486ffc086760c897b39b9f674b2fa"}, - {file = "Levenshtein-0.20.9-cp37-cp37m-win32.whl", hash = "sha256:7f31bcf257fec9719d0d97185c419d315f6f20a194f0b442919e352d19418b2e"}, - {file = "Levenshtein-0.20.9-cp37-cp37m-win_amd64.whl", hash = "sha256:48262bc9830ad60de96411fcb2e96a522c7206e7069169e04d89dd79364a7722"}, - {file = "Levenshtein-0.20.9-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:eba5696e1f8e8da225498fd1d743886d639400cafd0e5be3c553978cbb54c345"}, - {file = "Levenshtein-0.20.9-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:679333188f9791c85109d2981e97e8721a99b2b975b5c52d16aca50ac9c70757"}, - {file = "Levenshtein-0.20.9-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:06c9cfc61cf66833692d1ed258ec5a0871221b0779f1281c32a10348c492e2c5"}, - {file = "Levenshtein-0.20.9-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b5d80d949168df406f2ac9ade1a5d0419cef0a8df611c8c2efe88f0248c9d0c0"}, - {file = "Levenshtein-0.20.9-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9275c6e601ff7f659116e2235e8585950c9c39d72504006077be85bf27950b35"}, - {file = "Levenshtein-0.20.9-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6414eea342d9632045e12b66bef043dbc6557189a283dc4dcc5966f63fa48998"}, - {file = "Levenshtein-0.20.9-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:56571c58700600a382ecdf3f9efcb132ed16a0476cbb4e23a9478ab0ae788fd9"}, - {file = "Levenshtein-0.20.9-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e7ccb76ffd9b851384f9cf1595b90b17cae46f0ab895e234de11ea48f9d9f73a"}, - {file = "Levenshtein-0.20.9-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:109172943cff7fb10f28a9eb819eb3eaf9c88fe38661fb1d0f230a8ae68a615c"}, - {file = "Levenshtein-0.20.9-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:534c8bbdfd033fa20575d57332d9ac0447b5afbeca7db975ba169762ece2051f"}, - {file = "Levenshtein-0.20.9-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:381a725963c392585135654caa3c7fc32cb1755ed977fb9db72e8838fee261be"}, - {file = "Levenshtein-0.20.9-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:7e4a44b1223980a9880e6f2bbf19121a125928580df9e4e81207199190343e11"}, - {file = "Levenshtein-0.20.9-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:fc0ced58ee6d07351cde140a7ec88e5f2ceb053c805af1f90514d21914d21cad"}, - {file = "Levenshtein-0.20.9-cp38-cp38-win32.whl", hash = "sha256:5eec0868ffcd825564dd5e3399305eaa159220554d1aedbff13af0de1fe01f6c"}, - {file = "Levenshtein-0.20.9-cp38-cp38-win_amd64.whl", hash = "sha256:e9db476e40a3aa184631d102b716a019f70837eb0fcdd5b5d1504f099f91359c"}, - {file = "Levenshtein-0.20.9-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:d5a20ecc20a09a32c72128c43d7df23877a2469b3c17780ae83f9a9d55873c08"}, - {file = "Levenshtein-0.20.9-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8b7b772f2f62a19a15ccb1b09c6c7754ca7430bb7e19d4ca4ff232958786873b"}, - {file = "Levenshtein-0.20.9-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:af92326b90ea6fe4521cf6a5dfe450e21150393c573ef3ad9ee446f1009fbfbd"}, - {file = "Levenshtein-0.20.9-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b48554dad328e198a636f937e2f4c057aac8e4bfcb8467b10e0f5daa94307b17"}, - {file = "Levenshtein-0.20.9-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:82304821e128d5453d1755d1c2f3d9cdf75e9def3517cf913b09df174e20283b"}, - {file = "Levenshtein-0.20.9-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2052357c5da195ede7dbc81a4e3408ebd6374a1ff1b86a0a9d8b8ce9562b32c3"}, - {file = "Levenshtein-0.20.9-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:44d60c6b47ccd6841c990418f7f4f58c28f7da9b07b81eaafc99b836cf351df1"}, - {file = "Levenshtein-0.20.9-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8dc2194c917e4466cb604580b16e42286f04e3fe0424489459e68f0834f5c527"}, - {file = "Levenshtein-0.20.9-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bb1e20965d759d89318cac7ff7eb045eb1fafcb5c3fa3047a23f6ae20c810ad7"}, - {file = "Levenshtein-0.20.9-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:74e959035da10a54e7a2eee28408eff672297ce96cdadd6f4a2f269a06e395c4"}, - {file = "Levenshtein-0.20.9-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:4a441b23d9704f57eb34af6a300ae5c335b9e77e6a065ada36ca69d6fc582af9"}, - {file = "Levenshtein-0.20.9-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:f59470c49114a5da064712a427317f2b1fa5bb89aa2dfd0e300f8289e26aec28"}, - {file = "Levenshtein-0.20.9-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:06191f5d0527e3224107aea260b5cffc8a78722e0efb4e793f0e45c449b813a2"}, - {file = "Levenshtein-0.20.9-cp39-cp39-win32.whl", hash = "sha256:3235c461904fe94b4f62fee78a1658c1316344411c81b02400c27d692a893f8f"}, - {file = "Levenshtein-0.20.9-cp39-cp39-win_amd64.whl", hash = "sha256:8b852def43d165c2f2b468239d66b847d9e6f52a775fc657773ced04d26062bd"}, - {file = "Levenshtein-0.20.9-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:f674cc75f127692525563155e500a3fa16aaf24dafd33a9bcda46e2979f793a1"}, - {file = "Levenshtein-0.20.9-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a34e3fd21acb31fcd29a0c8353dca74dfbb59957210a6f142505907a9dff3d59"}, - {file = "Levenshtein-0.20.9-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e0ddddf2beafd1a2e17a87f80be562a7f7478e6098ccfc15de4c879972dfa2f9"}, - {file = "Levenshtein-0.20.9-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9649af1a896a4a7fc7f6f1fd093e8a92f463297f56c7bd0f8d7d16dfabeb236d"}, - {file = "Levenshtein-0.20.9-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:d7bd7f25336849027fbe5ed32b6ffd404436727d78a014e348dcd17347c73fd8"}, - {file = "Levenshtein-0.20.9-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:0371d996ae81089296f42b6e886c7bf138d1cb0f002b0c724a9e5d689b29b5a0"}, - {file = "Levenshtein-0.20.9-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7e00e2fda9f225b5f4537647f6195cf220d468532739d3390eaf082b1d76c87"}, - {file = "Levenshtein-0.20.9-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1600f5ebe2f2aebf13e88cf488ec2e5ce25f7a42b5846335018693baf4ea63bd"}, - {file = "Levenshtein-0.20.9-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9bcd59fcf06aaedda98da185ec289dc2c2c9922ce789f6a9c101709d4a22cac9"}, - {file = "Levenshtein-0.20.9-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:1549e307028fa5c3a8cf28ae8bcb1f6072df2abf7f36b9d7adf7fd60690fe372"}, - {file = "Levenshtein-0.20.9-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:795f2e95d09a33c66c73cd49be3ee632fb4b8c41be72c0cb8df29a329ce7d111"}, - {file = "Levenshtein-0.20.9-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:726bfb361d3b6786bea31392752f0ffcca568db7dc3f1e274f1b529489b8ad05"}, - {file = "Levenshtein-0.20.9-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e0fd315132786375de532355fa06b2f11c4b4af5784b7e064dc54b6ee0c3281"}, - {file = "Levenshtein-0.20.9-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0674bc0549d5ea9edb934b3b03a160a116cc410feb5739a51f9c4f618ee674e3"}, - {file = "Levenshtein-0.20.9-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:1ef8f3ecdfca5d6f0538226338d58617270439a1cc9b6cacb30a388984bb1608"}, - {file = "Levenshtein-0.20.9.tar.gz", hash = "sha256:70a8ad5e28bb76d87da1eb3f31de940836596547d6d01317c2289f5b7cd0b0ea"}, -] - -[package.dependencies] -rapidfuzz = ">=2.3.0,<3.0.0" + {file = "Levenshtein-0.21.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:59e5054c9dea821840af4623a4059c8f0ae56548a5eae8b9c7aaa0b3f1e33340"}, + {file = "Levenshtein-0.21.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:11694c6f7119d68cc199ff3b1407560c0efb0cc49f288169f28b2e032ee03cda"}, + {file = "Levenshtein-0.21.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f5f7ce639bea0f5e95a1f71963624b85521a39928a2a1bb0e66f6180facf5969"}, + {file = "Levenshtein-0.21.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:39e8a1866325b6d54de4e7d1bffffaf4b4c8cbf0988f47f0f2e929edfbeb870d"}, + {file = "Levenshtein-0.21.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ed73d619e203aad54e2e6119a2b58b7568a36bd50a547817d13618ea0acf4412"}, + {file = "Levenshtein-0.21.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:50fbe01be99554f644657c32a9e3085369d23e8ccc540d855c683947d3b48b67"}, + {file = "Levenshtein-0.21.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:675ba3afaa9e8ec393eb1eeee651697036e8391be54e6c28eae4bfdff4d5e64e"}, + {file = "Levenshtein-0.21.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c89a5ac319a80c131ca8d499ae0f7a91d4dd1dc3b2e9d8b095e991597b79c8f9"}, + {file = "Levenshtein-0.21.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:f9e3a5f4386c8f1811153f309a0ba3dc47d17e81a6dd29aa22d3e10212a2fd73"}, + {file = "Levenshtein-0.21.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:ea042ba262ea2a95d93c4d2d5879df956cf6c85ce22c037e3f0d4491182f10c5"}, + {file = "Levenshtein-0.21.1-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:622bc670b906c4bf219755625e9fa704ff07c561a90f1aa35f3f2d8ecd3ec088"}, + {file = "Levenshtein-0.21.1-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:f0e51ff6d5665884b0e39b4ae0ef4e2d2d0174147147db7a870ddc4123882212"}, + {file = "Levenshtein-0.21.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:cc8eb12c48598b20b4b99128bc2bd62974dfb65204ceb37807480788b1e66e64"}, + {file = "Levenshtein-0.21.1-cp310-cp310-win32.whl", hash = "sha256:04d338c9153ddf70a32f324cf9f902fe94a6da82122b8037ccde969d4cc0a94b"}, + {file = "Levenshtein-0.21.1-cp310-cp310-win_amd64.whl", hash = "sha256:5a10fc3be2bfb05b03b868d462941e4099b680b7f358a90b8c6d7d5946e9e97c"}, + {file = "Levenshtein-0.21.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:938581ba87b306675bc41e21c2b2822a9eb83fb1a0e4a4903b7398d7845b22e3"}, + {file = "Levenshtein-0.21.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:06da6c47aa459c725ee90dab467cd2f66956c5f9a43ddb51a0fe2496960f1d3e"}, + {file = "Levenshtein-0.21.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:eea308d98c64dbea48ac351011c4adf66acd936c4de2bf9955826ba8435197e2"}, + {file = "Levenshtein-0.21.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a51974fcb8a94284325cb88b474b76227532a25b035938a46167bebd1646718e"}, + {file = "Levenshtein-0.21.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:87edb05fc6e4eb14008433f02e89815a756fe4ecc32d7180bb757f26e4161e06"}, + {file = "Levenshtein-0.21.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aee4f570652ad77961e5ab871d11fd42752e7d2117b08324a0c8801a7ee0a7c5"}, + {file = "Levenshtein-0.21.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:43a06b0b492e0d936deff751ad4757786ba7cb5eee510d53b6dfe92c924ff733"}, + {file = "Levenshtein-0.21.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:952e72f173a65f271dfee102b5571004b6594d4f199864ddead77115a2c147fd"}, + {file = "Levenshtein-0.21.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d3f855669e1399597f7a2670310cf20fc04a35c6c446dd70320398e9aa481b3d"}, + {file = "Levenshtein-0.21.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:ca992783feaf1d6e25403340157fb584cf71371b094a575134393bba10b974fa"}, + {file = "Levenshtein-0.21.1-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:20361f42f6e7efa5853f69a41a272e9ecb90da284bec4312e42b58fa42b9a752"}, + {file = "Levenshtein-0.21.1-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:9bcb3abbe97975cc6a97baf24a3b6e0491472ecedbc0247a41eb2c8d73ecde5d"}, + {file = "Levenshtein-0.21.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:72b0b84adc52f4cf970a1bb276e76e115b30d693d6dbcd25fca0bcee85ca7cc7"}, + {file = "Levenshtein-0.21.1-cp311-cp311-win32.whl", hash = "sha256:4217ae380f42f825862eb8e2f9beca627fe9ab613f36e206842c003bb1affafc"}, + {file = "Levenshtein-0.21.1-cp311-cp311-win_amd64.whl", hash = "sha256:12bb3540e021c73c5d8796ecf8148afd441c4471731924a112bc31bc25abeabf"}, + {file = "Levenshtein-0.21.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:a0fa251b3b4c561d2f650d9a61fb8980815492bb088a0a521236995a1872e171"}, + {file = "Levenshtein-0.21.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d4bf11b89d8d7a7707ae5cac1ef86ac4ff78491482df037289470db8f0378043"}, + {file = "Levenshtein-0.21.1-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:91dca7085aa358da71fa50682fc8ff7e21365c99ef17dc1962a7bbf488003528"}, + {file = "Levenshtein-0.21.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f4f187f0929a35b6ddabc1324161e8c73ddbd4a7747249f10ec9ceaa793e904f"}, + {file = "Levenshtein-0.21.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d997da10fdf1a82e208fd1b05aba40705ca3f053919c84d2e952141d33e3ab3"}, + {file = "Levenshtein-0.21.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ed8f99e4e4ba8a43bb4fe0255606724f22069405fa1e3be679a2d90f74770e5"}, + {file = "Levenshtein-0.21.1-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:5acb7e84ccd619dcff6e04928fa8d8cc24f55bb2c9cdfe96620ed85b0a82a7c7"}, + {file = "Levenshtein-0.21.1-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:62dca15301bdba4ec7fcf53c39dd8d9c198194990cf035def3f47b7cb9c3213e"}, + {file = "Levenshtein-0.21.1-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:832951ad7b5ee0df8152f239a9fc602322da055264459dcf4d50d3ed68e68045"}, + {file = "Levenshtein-0.21.1-cp36-cp36m-musllinux_1_1_s390x.whl", hash = "sha256:e8ab4d5acdd3ac17161539d9f2ea764497dc269dcd8dc722ae4a394c7b64ae7f"}, + {file = "Levenshtein-0.21.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:3c13450450d537ec7ede3781be72d72db37cb131943148c8ada58b34e143fc6f"}, + {file = "Levenshtein-0.21.1-cp36-cp36m-win32.whl", hash = "sha256:267ad98befffeed90e73b8c644a297027adb81f61044843aeade7b4a44ccc7d7"}, + {file = "Levenshtein-0.21.1-cp36-cp36m-win_amd64.whl", hash = "sha256:d66d8f3ebde14840a310a557c8f69eed3e153f2477747365355d058208eea515"}, + {file = "Levenshtein-0.21.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:78d0fb5faef0413864c1b593e5261a840eaa47842b0fa4af7be4c09d90b24a14"}, + {file = "Levenshtein-0.21.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9dda976c1dae2a0b41a109facc48d1d242c7acb30ab4c04d8421496da6e153aa"}, + {file = "Levenshtein-0.21.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1dc54aeb02f38a36f16bca6b0f9d07462686d92716424d9a4a3fdd11f3624528"}, + {file = "Levenshtein-0.21.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:463fd7558f25c477c7e4a59af35c661e133473f62bb02ed2c07c9c95e1c2dc66"}, + {file = "Levenshtein-0.21.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f00495a80c5850466f0a57ea874761f78079702e28b63a1b6573ad254f828e44"}, + {file = "Levenshtein-0.21.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:31aa08e8ddac402edd530aaf708ab085fea7299c499404989eabfde143377911"}, + {file = "Levenshtein-0.21.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:9e96217a7c6a7d43071c830b1353a3ee669757ae477673f0fd3e3a97def6d410"}, + {file = "Levenshtein-0.21.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:d83b8c0ce41e410af143bd3abef94e480d143fdb83e60a01bab9069bf565dada"}, + {file = "Levenshtein-0.21.1-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:309f134f3d42fa7df7efbbd7975f2331de8c36da3ebdb3fad59abae84268abba"}, + {file = "Levenshtein-0.21.1-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:267bc6725506571fd3c03afcc871fa5cbf3d2cb6e4bd11043790fa60cbb0f8a4"}, + {file = "Levenshtein-0.21.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:4a6cd85ac5f7800e8127b3194fa02c59be735b6bdfe55b8516d094652235e038"}, + {file = "Levenshtein-0.21.1-cp37-cp37m-win32.whl", hash = "sha256:13e87517ce788d71deaa73e37332a67c4085c13e58ea3a0218092d555d1872ce"}, + {file = "Levenshtein-0.21.1-cp37-cp37m-win_amd64.whl", hash = "sha256:918f2e0f590cacb30edb88e7eccbf71b340d5f080c9e69009f1f00dc24810a67"}, + {file = "Levenshtein-0.21.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:d17c2ee8aa380c012b3ba015b87502934662c51b7609ef17366c76863e9551d6"}, + {file = "Levenshtein-0.21.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ee847d3e49870e914074fd31c069a1aaba6f71bee650d41de48e7e4b11671bf0"}, + {file = "Levenshtein-0.21.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8d01425bd54c482ccbbc6d953633450a2bdbb7d12450d9eeba6073a6d0f06a3c"}, + {file = "Levenshtein-0.21.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bff4f236d1b6c556a77975812a4d51071181721f3a29c08b42e5c4aa11730957"}, + {file = "Levenshtein-0.21.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:35a603d952e9f286fe8053332862c8cff426f5d8a85ee962c3a0f597f4c463c4"}, + {file = "Levenshtein-0.21.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9546ded45fb3cf8773ade9c91de164c6cb2cb4927516289abd422a262e81906c"}, + {file = "Levenshtein-0.21.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:79259b10f105f78853210d8769cf77ca55dac8c368dca33b4c10ffa8965e2543"}, + {file = "Levenshtein-0.21.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:41e0e539638a27b5e90a5d46679375f93a1cb65cf06efe7c413cf76f71d3d467"}, + {file = "Levenshtein-0.21.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:ccd0b89300a25decdb34d7c4efe2a971438015f552eeb416b8da12918cb3edc0"}, + {file = "Levenshtein-0.21.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:ef365ec78938597623d4fb96c8b0db423ab484fcfc00fae44c34b738b1eb1924"}, + {file = "Levenshtein-0.21.1-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:e701b9dfb121faf71b0c5757485fc49e1b511b7b8a80034aa1f580488f8f872e"}, + {file = "Levenshtein-0.21.1-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:e4c2fe1f49f1d8476fe44e243569d775c5454dca70a13be568430d2d2d760ea2"}, + {file = "Levenshtein-0.21.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:40a5e38d0c3e488d1dca5dc9c2691c000764813d4006c243f2ebd39e0b331e95"}, + {file = "Levenshtein-0.21.1-cp38-cp38-win32.whl", hash = "sha256:6c08879d0cf761cd750e976fda67bcc23cf1e485eaa030942e6628b876f4c6d8"}, + {file = "Levenshtein-0.21.1-cp38-cp38-win_amd64.whl", hash = "sha256:248348e94dee05c787b44f16533a366ec5bf8ba949c604ad0db69d0c872f3539"}, + {file = "Levenshtein-0.21.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:3824e9f75ec9f373fc8b4df23eae668918953487f5ff06db282ddcb3f9c802d2"}, + {file = "Levenshtein-0.21.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2e2ed817fa682243ef2e8a2728fcd0f9352d4e5edd104db44862d0bb55c75a7e"}, + {file = "Levenshtein-0.21.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:94a6ffd7257d12c64de34bc9f801a211e2daa624ec276305f8c67963a9896efa"}, + {file = "Levenshtein-0.21.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6833f8cefb96b8ccac457ad421866a74f4de973e7001699fcbbbe9ccb59a5c66"}, + {file = "Levenshtein-0.21.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c8126d2b51621483823c6e31d16bc1f5a964ae976aab4f241bbe74ed19d93770"}, + {file = "Levenshtein-0.21.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:58eaab403b77e62e096cbcbaf61728c8736f9f7a3e36a58fb663461e5d70144f"}, + {file = "Levenshtein-0.21.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47e6d66fe0110fd8e6efb1939d686099170c27b3ca838eab0c215f0781f05f06"}, + {file = "Levenshtein-0.21.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f5a1f28b34a15dd2d67bcc324f6661df8cfe66d6ec7ee7a64e921af8ae4c39b7"}, + {file = "Levenshtein-0.21.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:c37609f4e460e570810ec5176c5cdf91c494a9979638f7fef5fd345597245d17"}, + {file = "Levenshtein-0.21.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:656c70814280c4002af89112f1457b6ad24c42dfba58dcb2047a249ae8ccdd04"}, + {file = "Levenshtein-0.21.1-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:863d507cba67de2fa66d5501ed1bc5029363d2b393662ac7d740dd0330c66aba"}, + {file = "Levenshtein-0.21.1-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:9437c2342937decf3cf5ac79d0b9497734897c0a09dc813378c97f2916b7aa76"}, + {file = "Levenshtein-0.21.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a1cd48db3d03adb88bf71b45de77b9720f96d3b9d5ab7a32304352baec482689"}, + {file = "Levenshtein-0.21.1-cp39-cp39-win32.whl", hash = "sha256:023dffdde576639e48cab3cc835bfaf9c441df7a8e2829bf20104868db6e4f72"}, + {file = "Levenshtein-0.21.1-cp39-cp39-win_amd64.whl", hash = "sha256:dcc712696d4332962ecab6e4df40d5126d7379c6612e6058ee2e9d3f924387e3"}, + {file = "Levenshtein-0.21.1-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:9a8d60084e1c9e87ae247c601e331708de09ed23219b5e39af7c8e9115ab8152"}, + {file = "Levenshtein-0.21.1-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ffa6762f8ef1e7dfba101babe43de6edc541cbe64d33d816314ac67cd76c3979"}, + {file = "Levenshtein-0.21.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eec8a1eaaeadc217c15bc77d01bb29e146acdae73a0b2e9df1ad162263c9752e"}, + {file = "Levenshtein-0.21.1-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5da0e2dbddb98da890fb779823df991ad50f184b3d986b8c68784eecbb087f01"}, + {file = "Levenshtein-0.21.1-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:edac6490f84e8a0456cb40f6729d4199311ce50ca0ea4958572e1b7ea99f546c"}, + {file = "Levenshtein-0.21.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:b33e2cbaca6f7d01092a28711605568dbc08a3bb7b796d8986bf5d0d651a0b09"}, + {file = "Levenshtein-0.21.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69a430ab564d286f309c19f7abed34fce9c144f39f984c609ee690dd175cc421"}, + {file = "Levenshtein-0.21.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f30474b2906301594c8fb64cb7492c6978290c466a717c4b5316887a18b77af5"}, + {file = "Levenshtein-0.21.1-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9817dca597abde9fc9571d56a7eca8bd667e9dfc0867b190f1e8b43ce4fde761"}, + {file = "Levenshtein-0.21.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:7d7e00e8cb45981386df9d3f99073ba7de59bdb739069766b32906421bb1026b"}, + {file = "Levenshtein-0.21.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:c9a072cb0f6e90092c4323cd7731eb539a79ac360045dbe3cc49a123ba381fc5"}, + {file = "Levenshtein-0.21.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2d880a87aca186342bc2fe16b064c3ed434d2a0c170c419f23b4e00261a5340a"}, + {file = "Levenshtein-0.21.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f282711a220d1bdf245da508e1fefdf7680d1f7482a094e37465674a7e6985ae"}, + {file = "Levenshtein-0.21.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cdba9f8a7a98b0c4c0bc004b811fb31a96521cd264aeb5375898478e7703de4d"}, + {file = "Levenshtein-0.21.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:b2410469cc8fd0f42aa00e63063c42f8aff501996cd5424a5c904739bdaaf4fe"}, + {file = "Levenshtein-0.21.1.tar.gz", hash = "sha256:2e4fc4522f9bf73c6ab4cedec834783999b247312ec9e3d1435a5424ad5bc908"}, +] + +[package.dependencies] +rapidfuzz = ">=2.3.0,<4.0.0" [[package]] name = "markdown" version = "3.3.7" description = "Python implementation of Markdown." -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -1448,7 +1390,6 @@ testing = ["coverage", "pyyaml"] name = "markdown-it-py" version = "2.2.0" description = "Python port of markdown-it. Markdown parsing, done right!" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -1473,7 +1414,6 @@ testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] name = "markupsafe" version = "2.1.2" description = "Safely add untrusted strings to HTML/XML markup." -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -1533,7 +1473,6 @@ files = [ name = "matplotlib-inline" version = "0.1.6" description = "Inline Matplotlib backend for Jupyter" -category = "dev" optional = false python-versions = ">=3.5" files = [ @@ -1548,7 +1487,6 @@ traitlets = "*" name = "mdit-py-plugins" version = "0.3.5" description = "Collection of plugins for markdown-it-py" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -1568,7 +1506,6 @@ testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] name = "mdurl" version = "0.1.2" description = "Markdown URL utilities" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -1580,7 +1517,6 @@ files = [ name = "mergedeep" version = "1.3.4" description = "A deep merge function for 🐍." -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -1592,7 +1528,6 @@ files = [ name = "mistune" version = "2.0.5" description = "A sane Markdown parser with useful plugins and renderers" -category = "dev" optional = false python-versions = "*" files = [ @@ -1602,14 +1537,13 @@ files = [ [[package]] name = "mkdocs" -version = "1.4.2" +version = "1.4.3" description = "Project documentation with Markdown." -category = "dev" optional = false python-versions = ">=3.7" files = [ - {file = "mkdocs-1.4.2-py3-none-any.whl", hash = "sha256:c8856a832c1e56702577023cd64cc5f84948280c1c0fcc6af4cd39006ea6aa8c"}, - {file = "mkdocs-1.4.2.tar.gz", hash = "sha256:8947af423a6d0facf41ea1195b8e1e8c85ad94ac95ae307fe11232e0424b11c5"}, + {file = "mkdocs-1.4.3-py3-none-any.whl", hash = "sha256:6ee46d309bda331aac915cd24aab882c179a933bd9e77b80ce7d2eaaa3f689dd"}, + {file = "mkdocs-1.4.3.tar.gz", hash = "sha256:5955093bbd4dd2e9403c5afaf57324ad8b04f16886512a3ee6ef828956481c57"}, ] [package.dependencies] @@ -1632,7 +1566,6 @@ min-versions = ["babel (==2.9.0)", "click (==7.0)", "colorama (==0.4)", "ghp-imp name = "mkdocs-autorefs" version = "0.4.1" description = "Automatically link across pages in MkDocs." -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -1648,7 +1581,6 @@ mkdocs = ">=1.1" name = "mkdocs-exclude" version = "1.0.2" description = "A mkdocs plugin that lets you exclude files or trees." -category = "dev" optional = false python-versions = "*" files = [ @@ -1660,39 +1592,33 @@ mkdocs = "*" [[package]] name = "mkdocs-gen-files" -version = "0.4.0" +version = "0.5.0" description = "MkDocs plugin to programmatically generate documentation pages during the build" -category = "dev" optional = false -python-versions = ">=3.7,<4.0" +python-versions = ">=3.7" files = [ - {file = "mkdocs-gen-files-0.4.0.tar.gz", hash = "sha256:377bff8ee8e93515916689f483d971643f83a94eed7e92318854da8f344f0163"}, - {file = "mkdocs_gen_files-0.4.0-py3-none-any.whl", hash = "sha256:3241a4c947ecd11763ca77cc645015305bf71a0e1b9b886801c114fcf9971e71"}, + {file = "mkdocs_gen_files-0.5.0-py3-none-any.whl", hash = "sha256:7ac060096f3f40bd19039e7277dd3050be9a453c8ac578645844d4d91d7978ea"}, + {file = "mkdocs_gen_files-0.5.0.tar.gz", hash = "sha256:4c7cf256b5d67062a788f6b1d035e157fc1a9498c2399be9af5257d4ff4d19bc"}, ] [package.dependencies] -mkdocs = ">=1.0.3,<2.0.0" +mkdocs = ">=1.0.3" [[package]] name = "mkdocs-glightbox" -version = "0.3.2" +version = "0.3.4" description = "MkDocs plugin supports image lightbox with GLightbox." -category = "dev" optional = false python-versions = "*" files = [ - {file = "mkdocs-glightbox-0.3.2.tar.gz", hash = "sha256:aec32193fa367f83a5159a10ef0a0e501e595860f4399fb860ca1c5d95fd099d"}, - {file = "mkdocs_glightbox-0.3.2-py3-none-any.whl", hash = "sha256:ef168717e0d6f5e3b33afa8a62837e9ea3b8754fbf073760db169650edbcb866"}, + {file = "mkdocs-glightbox-0.3.4.tar.gz", hash = "sha256:96aaf98216f83c0d0fad2e42a8d805cfa6329d6ab25b54265012ccb2154010d8"}, + {file = "mkdocs_glightbox-0.3.4-py3-none-any.whl", hash = "sha256:8f894435b4f75231164e5d9fb023c01e922e6769e74a121e822c4914f310a41d"}, ] -[package.dependencies] -beautifulsoup4 = ">=4.11.1" - [[package]] name = "mkdocs-jupyter" version = "0.24.1" description = "Use Jupyter in mkdocs websites" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -1714,7 +1640,6 @@ test = ["pytest", "pytest-cov"] name = "mkdocs-literate-nav" version = "0.6.0" description = "MkDocs plugin to specify the navigation in Markdown instead of YAML" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -1727,14 +1652,13 @@ mkdocs = ">=1.0.3" [[package]] name = "mkdocs-material" -version = "9.1.5" +version = "9.1.17" description = "Documentation that simply works" -category = "dev" optional = false python-versions = ">=3.7" files = [ - {file = "mkdocs_material-9.1.5-py3-none-any.whl", hash = "sha256:981e1ef0250e2fbcc23610e9b20e5f242fe1f808079b9bcfbb6c49aa2999343c"}, - {file = "mkdocs_material-9.1.5.tar.gz", hash = "sha256:744519bca52b1e8fe7c2e80e15ed59baf8948111ec763ae6ae629c409bd16d6e"}, + {file = "mkdocs_material-9.1.17-py3-none-any.whl", hash = "sha256:809ed68427fbab0330b0b07bc93175824c3b98f4187060a5c7b46aa8ae398a75"}, + {file = "mkdocs_material-9.1.17.tar.gz", hash = "sha256:5a076524625047bf4ee4da1509ec90626f8fce915839dc07bdae6b59ff4f36f9"}, ] [package.dependencies] @@ -1752,7 +1676,6 @@ requests = ">=2.26" name = "mkdocs-material-extensions" version = "1.1.1" description = "Extension pack for Python Markdown and MkDocs Material." -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -1764,7 +1687,6 @@ files = [ name = "mkdocs-section-index" version = "0.3.5" description = "MkDocs plugin to allow clickable sections that lead to an index page" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -1777,14 +1699,13 @@ mkdocs = ">=1.0.3" [[package]] name = "mkdocstrings" -version = "0.20.0" +version = "0.22.0" description = "Automatic documentation from sources, for MkDocs." -category = "dev" optional = false python-versions = ">=3.7" files = [ - {file = "mkdocstrings-0.20.0-py3-none-any.whl", hash = "sha256:f17fc2c4f760ec302b069075ef9e31045aa6372ca91d2f35ded3adba8e25a472"}, - {file = "mkdocstrings-0.20.0.tar.gz", hash = "sha256:c757f4f646d4f939491d6bc9256bfe33e36c5f8026392f49eaa351d241c838e5"}, + {file = "mkdocstrings-0.22.0-py3-none-any.whl", hash = "sha256:2d4095d461554ff6a778fdabdca3c00c468c2f1459d469f7a7f622a2b23212ba"}, + {file = "mkdocstrings-0.22.0.tar.gz", hash = "sha256:82a33b94150ebb3d4b5c73bab4598c3e21468c79ec072eff6931c8f3bfc38256"}, ] [package.dependencies] @@ -1802,25 +1723,23 @@ python-legacy = ["mkdocstrings-python-legacy (>=0.2.1)"] [[package]] name = "mkdocstrings-python" -version = "0.8.3" +version = "1.1.2" description = "A Python handler for mkdocstrings." -category = "dev" optional = false python-versions = ">=3.7" files = [ - {file = "mkdocstrings-python-0.8.3.tar.gz", hash = "sha256:9ae473f6dc599339b09eee17e4d2b05d6ac0ec29860f3fc9b7512d940fc61adf"}, - {file = "mkdocstrings_python-0.8.3-py3-none-any.whl", hash = "sha256:4e6e1cd6f37a785de0946ced6eb846eb2f5d891ac1cc2c7b832943d3529087a7"}, + {file = "mkdocstrings_python-1.1.2-py3-none-any.whl", hash = "sha256:c2b652a850fec8e85034a9cdb3b45f8ad1a558686edc20ed1f40b4e17e62070f"}, + {file = "mkdocstrings_python-1.1.2.tar.gz", hash = "sha256:f28bdcacb9bcdb44b6942a5642c1ea8b36870614d33e29e3c923e204a8d8ed61"}, ] [package.dependencies] griffe = ">=0.24" -mkdocstrings = ">=0.19" +mkdocstrings = ">=0.20" [[package]] name = "murmurhash" version = "1.0.9" description = "Cython bindings for MurmurHash" -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -1858,7 +1777,6 @@ files = [ name = "mypy-extensions" version = "1.0.0" description = "Type system extensions for programs checked with the mypy type checker." -category = "main" optional = false python-versions = ">=3.5" files = [ @@ -1870,7 +1788,6 @@ files = [ name = "nbclassic" version = "0.5.3" description = "Jupyter Notebook as a Jupyter Server extension." -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -1906,7 +1823,6 @@ test = ["coverage", "nbval", "pytest", "pytest-cov", "pytest-jupyter", "pytest-p name = "nbclient" version = "0.7.2" description = "A client library for executing notebooks. Formerly nbconvert's ExecutePreprocessor." -category = "dev" optional = false python-versions = ">=3.7.0" files = [ @@ -1916,7 +1832,7 @@ files = [ [package.dependencies] jupyter-client = ">=6.1.12" -jupyter-core = ">=4.12,<5.0.0 || >=5.1.0" +jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0" nbformat = ">=5.1" traitlets = ">=5.3" @@ -1929,7 +1845,6 @@ test = ["ipykernel", "ipython", "ipywidgets", "nbconvert (>=7.0.0)", "pytest (>= name = "nbconvert" version = "7.2.10" description = "Converting Jupyter Notebooks" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -1967,7 +1882,6 @@ webpdf = ["pyppeteer (>=1,<1.1)"] name = "nbformat" version = "5.7.3" description = "The Jupyter Notebook format" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -1989,7 +1903,6 @@ test = ["pep440", "pre-commit", "pytest", "testpath"] name = "nest-asyncio" version = "1.5.6" description = "Patch asyncio to allow nested event loops" -category = "dev" optional = false python-versions = ">=3.5" files = [ @@ -2001,7 +1914,6 @@ files = [ name = "notebook" version = "6.5.3" description = "A web-based notebook environment for interactive computing" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -2036,7 +1948,6 @@ test = ["coverage", "nbval", "pytest", "pytest-cov", "requests", "requests-unixs name = "notebook-shim" version = "0.2.2" description = "A shim layer for notebook traits and config" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -2054,7 +1965,6 @@ test = ["pytest", "pytest-console-scripts", "pytest-tornasync"] name = "numpy" version = "1.24.2" description = "Fundamental package for array computing in Python" -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -2092,7 +2002,6 @@ files = [ name = "packaging" version = "23.0" description = "Core utilities for Python packages" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -2104,7 +2013,6 @@ files = [ name = "pandocfilters" version = "1.5.0" description = "Utilities for writing pandoc filters in python" -category = "dev" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" files = [ @@ -2116,7 +2024,6 @@ files = [ name = "parso" version = "0.8.3" description = "A Python Parser" -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -2132,7 +2039,6 @@ testing = ["docopt", "pytest (<6.0.0)"] name = "pathspec" version = "0.11.1" description = "Utility library for gitignore style pattern matching of file paths." -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -2144,7 +2050,6 @@ files = [ name = "pathy" version = "0.10.1" description = "pathlib.Path subclasses for local and cloud bucket storage" -category = "main" optional = false python-versions = ">= 3.6" files = [ @@ -2167,7 +2072,6 @@ test = ["mock", "pytest", "pytest-coverage", "typer-cli"] name = "pexpect" version = "4.8.0" description = "Pexpect allows easy control of interactive console applications." -category = "dev" optional = false python-versions = "*" files = [ @@ -2182,7 +2086,6 @@ ptyprocess = ">=0.5" name = "pickleshare" version = "0.7.5" description = "Tiny 'shelve'-like database with concurrency support" -category = "dev" optional = false python-versions = "*" files = [ @@ -2194,7 +2097,6 @@ files = [ name = "platformdirs" version = "3.1.1" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -2210,7 +2112,6 @@ test = ["appdirs (==1.4.4)", "covdefaults (>=2.2.2)", "pytest (>=7.2.1)", "pytes name = "pluggy" version = "1.0.0" description = "plugin and hook calling mechanisms for python" -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -2226,7 +2127,6 @@ testing = ["pytest", "pytest-benchmark"] name = "preshed" version = "3.0.8" description = "Cython hash table that trusts the keys are pre-hashed" -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -2268,7 +2168,6 @@ murmurhash = ">=0.28.0,<1.1.0" name = "prometheus-client" version = "0.16.0" description = "Python client for the Prometheus monitoring system." -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -2283,7 +2182,6 @@ twisted = ["twisted"] name = "prompt-toolkit" version = "3.0.38" description = "Library for building powerful interactive command lines in Python" -category = "dev" optional = false python-versions = ">=3.7.0" files = [ @@ -2298,7 +2196,6 @@ wcwidth = "*" name = "psutil" version = "5.9.4" description = "Cross-platform lib for process and system monitoring in Python." -category = "dev" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" files = [ @@ -2325,7 +2222,6 @@ test = ["enum34", "ipaddress", "mock", "pywin32", "wmi"] name = "ptyprocess" version = "0.7.0" description = "Run a subprocess in a pseudo terminal" -category = "dev" optional = false python-versions = "*" files = [ @@ -2337,7 +2233,6 @@ files = [ name = "pure-eval" version = "0.2.2" description = "Safely evaluate AST nodes without side effects" -category = "dev" optional = false python-versions = "*" files = [ @@ -2352,7 +2247,6 @@ tests = ["pytest"] name = "pycparser" version = "2.21" description = "C parser in Python" -category = "dev" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" files = [ @@ -2364,7 +2258,6 @@ files = [ name = "pydantic" version = "1.10.6" description = "Data validation and settings management using python type hints" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -2417,7 +2310,6 @@ email = ["email-validator (>=1.0.3)"] name = "pygments" version = "2.14.0" description = "Pygments is a syntax highlighting package written in Python." -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -2430,14 +2322,13 @@ plugins = ["importlib-metadata"] [[package]] name = "pymdown-extensions" -version = "9.10" +version = "10.0" description = "Extension pack for Python Markdown." -category = "dev" optional = false python-versions = ">=3.7" files = [ - {file = "pymdown_extensions-9.10-py3-none-any.whl", hash = "sha256:31eaa76ce6f96aabfcea98787c2fff2c5c0611b20a53a94213970cfbf05f02b8"}, - {file = "pymdown_extensions-9.10.tar.gz", hash = "sha256:562c38eee4ce3f101ce631b804bfc2177a8a76c7e4dc908871fb6741a90257a7"}, + {file = "pymdown_extensions-10.0-py3-none-any.whl", hash = "sha256:e6cbe8ace7d8feda30bc4fd6a21a073893a9a0e90c373e92d69ce5b653051f55"}, + {file = "pymdown_extensions-10.0.tar.gz", hash = "sha256:9a77955e63528c2ee98073a1fb3207c1a45607bc74a34ef21acd098f46c3aa8a"}, ] [package.dependencies] @@ -2448,7 +2339,6 @@ pyyaml = "*" name = "pyrsistent" version = "0.19.3" description = "Persistent/Functional/Immutable data structures" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -2483,18 +2373,16 @@ files = [ [[package]] name = "pytest" -version = "7.2.2" +version = "7.4.0" description = "pytest: simple powerful testing with Python" -category = "dev" optional = false python-versions = ">=3.7" files = [ - {file = "pytest-7.2.2-py3-none-any.whl", hash = "sha256:130328f552dcfac0b1cec75c12e3f005619dc5f874f0a06e8ff7263f0ee6225e"}, - {file = "pytest-7.2.2.tar.gz", hash = "sha256:c99ab0c73aceb050f68929bc93af19ab6db0558791c6a0715723abe9d0ade9d4"}, + {file = "pytest-7.4.0-py3-none-any.whl", hash = "sha256:78bf16451a2eb8c7a2ea98e32dc119fd2aa758f1d5d66dbf0a59d69a3969df32"}, + {file = "pytest-7.4.0.tar.gz", hash = "sha256:b4bf8c45bd59934ed84001ad51e11b4ee40d40a1229d2c79f9c592b0a3f6bd8a"}, ] [package.dependencies] -attrs = ">=19.2.0" colorama = {version = "*", markers = "sys_platform == \"win32\""} exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} iniconfig = "*" @@ -2503,18 +2391,17 @@ pluggy = ">=0.12,<2.0" tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""} [package.extras] -testing = ["argcomplete", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "xmlschema"] +testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] [[package]] name = "pytest-cov" -version = "4.0.0" +version = "4.1.0" description = "Pytest plugin for measuring coverage." -category = "dev" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" files = [ - {file = "pytest-cov-4.0.0.tar.gz", hash = "sha256:996b79efde6433cdbd0088872dbc5fb3ed7fe1578b68cdbba634f14bb8dd0470"}, - {file = "pytest_cov-4.0.0-py3-none-any.whl", hash = "sha256:2feb1b751d66a8bd934e5edfa2e961d11309dc37b73b0eabe73b5945fee20f6b"}, + {file = "pytest-cov-4.1.0.tar.gz", hash = "sha256:3904b13dfbfec47f003b8e77fd5b589cd11904a21ddf1ab38a64f204d6a10ef6"}, + {file = "pytest_cov-4.1.0-py3-none-any.whl", hash = "sha256:6ba70b9e97e69fcc3fb45bfeab2d0a138fb65c4d0d6a41ef33983ad114be8c3a"}, ] [package.dependencies] @@ -2528,7 +2415,6 @@ testing = ["fields", "hunter", "process-tests", "pytest-xdist", "six", "virtuale name = "python-dateutil" version = "2.8.2" description = "Extensions to the standard Python datetime module" -category = "dev" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" files = [ @@ -2543,7 +2429,6 @@ six = ">=1.5" name = "python-json-logger" version = "2.0.7" description = "A python library adding a json log formatter" -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -2555,7 +2440,6 @@ files = [ name = "pywin32" version = "305" description = "Python for Window Extensions" -category = "dev" optional = false python-versions = "*" files = [ @@ -2579,7 +2463,6 @@ files = [ name = "pywinpty" version = "2.0.10" description = "Pseudo terminal support for Windows from Python." -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -2595,7 +2478,6 @@ files = [ name = "pyyaml" version = "6.0" description = "YAML parser and emitter for Python" -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -2645,7 +2527,6 @@ files = [ name = "pyyaml-env-tag" version = "0.1" description = "A custom YAML tag for referencing environment variables in YAML files. " -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -2660,7 +2541,6 @@ pyyaml = "*" name = "pyzmq" version = "25.0.1" description = "Python bindings for 0MQ" -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -2750,7 +2630,6 @@ cffi = {version = "*", markers = "implementation_name == \"pypy\""} name = "qtconsole" version = "5.4.1" description = "Jupyter Qt console" -category = "dev" optional = false python-versions = ">= 3.7" files = [ @@ -2777,7 +2656,6 @@ test = ["flaky", "pytest", "pytest-qt"] name = "qtpy" version = "2.3.0" description = "Provides an abstraction layer on top of the various Qt bindings (PyQt5/6 and PySide2/6)." -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -2795,7 +2673,6 @@ test = ["pytest (>=6,!=7.0.0,!=7.0.1)", "pytest-cov (>=3.0.0)", "pytest-qt"] name = "rapidfuzz" version = "2.13.7" description = "rapid fuzzy string matching" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -2897,7 +2774,6 @@ full = ["numpy"] name = "regex" version = "2022.10.31" description = "Alternative regular expression module, to replace re." -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -2993,21 +2869,20 @@ files = [ [[package]] name = "requests" -version = "2.28.2" +version = "2.31.0" description = "Python HTTP for Humans." -category = "main" optional = false -python-versions = ">=3.7, <4" +python-versions = ">=3.7" files = [ - {file = "requests-2.28.2-py3-none-any.whl", hash = "sha256:64299f4909223da747622c030b781c0d7811e359c37124b4bd368fb8c6518baa"}, - {file = "requests-2.28.2.tar.gz", hash = "sha256:98b1b2782e3c6c4904938b84c0eb932721069dfdb9134313beff7c83c2df24bf"}, + {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"}, + {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"}, ] [package.dependencies] certifi = ">=2017.4.17" charset-normalizer = ">=2,<4" idna = ">=2.5,<4" -urllib3 = ">=1.21.1,<1.27" +urllib3 = ">=1.21.1,<3" [package.extras] socks = ["PySocks (>=1.5.6,!=1.5.7)"] @@ -3017,7 +2892,6 @@ use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] name = "rfc3339-validator" version = "0.1.4" description = "A pure python RFC3339 validator" -category = "dev" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" files = [ @@ -3032,7 +2906,6 @@ six = "*" name = "rfc3986-validator" version = "0.1.1" description = "Pure python rfc3986 validator" -category = "dev" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" files = [ @@ -3042,48 +2915,44 @@ files = [ [[package]] name = "scipy" -version = "1.10.1" +version = "1.11.1" description = "Fundamental algorithms for scientific computing in Python" -category = "main" -optional = false -python-versions = "<3.12,>=3.8" -files = [ - {file = "scipy-1.10.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e7354fd7527a4b0377ce55f286805b34e8c54b91be865bac273f527e1b839019"}, - {file = "scipy-1.10.1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:4b3f429188c66603a1a5c549fb414e4d3bdc2a24792e061ffbd607d3d75fd84e"}, - {file = "scipy-1.10.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1553b5dcddd64ba9a0d95355e63fe6c3fc303a8fd77c7bc91e77d61363f7433f"}, - {file = "scipy-1.10.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c0ff64b06b10e35215abce517252b375e580a6125fd5fdf6421b98efbefb2d2"}, - {file = "scipy-1.10.1-cp310-cp310-win_amd64.whl", hash = "sha256:fae8a7b898c42dffe3f7361c40d5952b6bf32d10c4569098d276b4c547905ee1"}, - {file = "scipy-1.10.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0f1564ea217e82c1bbe75ddf7285ba0709ecd503f048cb1236ae9995f64217bd"}, - {file = "scipy-1.10.1-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:d925fa1c81b772882aa55bcc10bf88324dadb66ff85d548c71515f6689c6dac5"}, - {file = "scipy-1.10.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aaea0a6be54462ec027de54fca511540980d1e9eea68b2d5c1dbfe084797be35"}, - {file = "scipy-1.10.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15a35c4242ec5f292c3dd364a7c71a61be87a3d4ddcc693372813c0b73c9af1d"}, - {file = "scipy-1.10.1-cp311-cp311-win_amd64.whl", hash = "sha256:43b8e0bcb877faf0abfb613d51026cd5cc78918e9530e375727bf0625c82788f"}, - {file = "scipy-1.10.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5678f88c68ea866ed9ebe3a989091088553ba12c6090244fdae3e467b1139c35"}, - {file = "scipy-1.10.1-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:39becb03541f9e58243f4197584286e339029e8908c46f7221abeea4b749fa88"}, - {file = "scipy-1.10.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bce5869c8d68cf383ce240e44c1d9ae7c06078a9396df68ce88a1230f93a30c1"}, - {file = "scipy-1.10.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:07c3457ce0b3ad5124f98a86533106b643dd811dd61b548e78cf4c8786652f6f"}, - {file = "scipy-1.10.1-cp38-cp38-win_amd64.whl", hash = "sha256:049a8bbf0ad95277ffba9b3b7d23e5369cc39e66406d60422c8cfef40ccc8415"}, - {file = "scipy-1.10.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:cd9f1027ff30d90618914a64ca9b1a77a431159df0e2a195d8a9e8a04c78abf9"}, - {file = "scipy-1.10.1-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:79c8e5a6c6ffaf3a2262ef1be1e108a035cf4f05c14df56057b64acc5bebffb6"}, - {file = "scipy-1.10.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:51af417a000d2dbe1ec6c372dfe688e041a7084da4fdd350aeb139bd3fb55353"}, - {file = "scipy-1.10.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1b4735d6c28aad3cdcf52117e0e91d6b39acd4272f3f5cd9907c24ee931ad601"}, - {file = "scipy-1.10.1-cp39-cp39-win_amd64.whl", hash = "sha256:7ff7f37b1bf4417baca958d254e8e2875d0cc23aaadbe65b3d5b3077b0eb23ea"}, - {file = "scipy-1.10.1.tar.gz", hash = "sha256:2cf9dfb80a7b4589ba4c40ce7588986d6d5cebc5457cad2c2880f6bc2d42f3a5"}, -] - -[package.dependencies] -numpy = ">=1.19.5,<1.27.0" - -[package.extras] -dev = ["click", "doit (>=0.36.0)", "flake8", "mypy", "pycodestyle", "pydevtool", "rich-click", "typing_extensions"] -doc = ["matplotlib (>2)", "numpydoc", "pydata-sphinx-theme (==0.9.0)", "sphinx (!=4.1.0)", "sphinx-design (>=0.2.0)"] +optional = false +python-versions = "<3.13,>=3.9" +files = [ + {file = "scipy-1.11.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:aec8c62fbe52914f9cf28d846cf0401dd80ab80788bbab909434eb336ed07c04"}, + {file = "scipy-1.11.1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:3b9963798df1d8a52db41a6fc0e6fa65b1c60e85d73da27ae8bb754de4792481"}, + {file = "scipy-1.11.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e8eb42db36526b130dfbc417609498a6192381abc1975b91e3eb238e0b41c1a"}, + {file = "scipy-1.11.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:366a6a937110d80dca4f63b3f5b00cc89d36f678b2d124a01067b154e692bab1"}, + {file = "scipy-1.11.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:08d957ca82d3535b3b9ba6c8ff355d78fe975271874e2af267cb5add5bd78625"}, + {file = "scipy-1.11.1-cp310-cp310-win_amd64.whl", hash = "sha256:e866514bc2d660608447b6ba95c8900d591f2865c07cca0aa4f7ff3c4ca70f30"}, + {file = "scipy-1.11.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ba94eeef3c9caa4cea7b402a35bb02a5714ee1ee77eb98aca1eed4543beb0f4c"}, + {file = "scipy-1.11.1-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:512fdc18c65f76dadaca139348e525646d440220d8d05f6d21965b8d4466bccd"}, + {file = "scipy-1.11.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cce154372f0ebe88556ed06d7b196e9c2e0c13080ecb58d0f35062dc7cc28b47"}, + {file = "scipy-1.11.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b4bb943010203465ac81efa392e4645265077b4d9e99b66cf3ed33ae12254173"}, + {file = "scipy-1.11.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:249cfa465c379c9bb2c20123001e151ff5e29b351cbb7f9c91587260602c58d0"}, + {file = "scipy-1.11.1-cp311-cp311-win_amd64.whl", hash = "sha256:ffb28e3fa31b9c376d0fb1f74c1f13911c8c154a760312fbee87a21eb21efe31"}, + {file = "scipy-1.11.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:39154437654260a52871dfde852adf1b93b1d1bc5dc0ffa70068f16ec0be2624"}, + {file = "scipy-1.11.1-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:b588311875c58d1acd4ef17c983b9f1ab5391755a47c3d70b6bd503a45bfaf71"}, + {file = "scipy-1.11.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d51565560565a0307ed06fa0ec4c6f21ff094947d4844d6068ed04400c72d0c3"}, + {file = "scipy-1.11.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b41a0f322b4eb51b078cb3441e950ad661ede490c3aca66edef66f4b37ab1877"}, + {file = "scipy-1.11.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:396fae3f8c12ad14c5f3eb40499fd06a6fef8393a6baa352a652ecd51e74e029"}, + {file = "scipy-1.11.1-cp39-cp39-win_amd64.whl", hash = "sha256:be8c962a821957fdde8c4044efdab7a140c13294997a407eaee777acf63cbf0c"}, + {file = "scipy-1.11.1.tar.gz", hash = "sha256:fb5b492fa035334fd249f0973cc79ecad8b09c604b42a127a677b45a9a3d4289"}, +] + +[package.dependencies] +numpy = ">=1.21.6,<1.28.0" + +[package.extras] +dev = ["click", "cython-lint (>=0.12.2)", "doit (>=0.36.0)", "mypy", "pycodestyle", "pydevtool", "rich-click", "ruff", "types-psutil", "typing_extensions"] +doc = ["jupytext", "matplotlib (>2)", "myst-nb", "numpydoc", "pooch", "pydata-sphinx-theme (==0.9.0)", "sphinx (!=4.1.0)", "sphinx-design (>=0.2.0)"] test = ["asv", "gmpy2", "mpmath", "pooch", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"] [[package]] name = "send2trash" version = "1.8.0" description = "Send file to trash natively under Mac OS X, Windows and Linux." -category = "dev" optional = false python-versions = "*" files = [ @@ -3100,7 +2969,6 @@ win32 = ["pywin32"] name = "setuptools" version = "67.6.0" description = "Easily download, build, install, upgrade, and uninstall Python packages" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -3117,7 +2985,6 @@ testing-integration = ["build[virtualenv]", "filelock (>=3.4.0)", "jaraco.envs ( name = "six" version = "1.16.0" description = "Python 2 and 3 compatibility utilities" -category = "dev" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" files = [ @@ -3129,7 +2996,6 @@ files = [ name = "smart-open" version = "6.3.0" description = "Utils for streaming large files (S3, HDFS, GCS, Azure Blob Storage, gzip, bz2...)" -category = "main" optional = false python-versions = ">=3.6,<4.0" files = [ @@ -3151,7 +3017,6 @@ webhdfs = ["requests"] name = "sniffio" version = "1.3.0" description = "Sniff out which async library your code is running under" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -3163,7 +3028,6 @@ files = [ name = "soupsieve" version = "2.4" description = "A modern CSS selector implementation for Beautiful Soup." -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -3173,40 +3037,39 @@ files = [ [[package]] name = "spacy" -version = "3.5.1" +version = "3.5.4" description = "Industrial-strength Natural Language Processing (NLP) in Python" -category = "main" optional = false python-versions = ">=3.6" files = [ - {file = "spacy-3.5.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:428925faf26a7c7a9564431d7a505af7816b22b5c68b240bbe073ae928e9ef36"}, - {file = "spacy-3.5.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:63bc19f4b5fa5f806698e7d16828cacbfefd0ab44f770e0b2a1a0509dd07f6f9"}, - {file = "spacy-3.5.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f5a1073cc7bb9896624682f6a5ab29c2d3d2d935cb36f88b25cbb01f12b57ef"}, - {file = "spacy-3.5.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bb9af95d1c06e23e89731d61f3fa5f28583684e10bd3d29d9e7bb161ffe02df9"}, - {file = "spacy-3.5.1-cp310-cp310-win_amd64.whl", hash = "sha256:dec30afd4916cb4f02449ccec94e2f8a3eb929686e9f96bd74f51f4c07d75577"}, - {file = "spacy-3.5.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:9d2e256c44241b9a2ac3204659891d332d370dfa0e39917254574bc1ffdfb079"}, - {file = "spacy-3.5.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d337054213f837ae295431a35638bb469c4e4796f6c5ff17d2dd18d545615a0e"}, - {file = "spacy-3.5.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ab9bbd8e34bfabd506f74d2739c6a4e47c899fd7d3f1648bbffde0c16b8a339d"}, - {file = "spacy-3.5.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d5ab0e2b406b3953c5975adcc4ac09bdc8fbcb20dd9a2a8ea2774b4d83106c24"}, - {file = "spacy-3.5.1-cp311-cp311-win_amd64.whl", hash = "sha256:9cbec19e55fcdb6e4be220c6b6335af96c374a7ac76dffb15f9da95c9d39ce62"}, - {file = "spacy-3.5.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f92b590c1c50eb421b6aaa0373b37fbdfb290a130771728e8d06159517cc120d"}, - {file = "spacy-3.5.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2525bc1ec9e784597365daf245f65b9ca9fd8a25fa96f9c7a6b7bfd5048b87bc"}, - {file = "spacy-3.5.1-cp36-cp36m-win_amd64.whl", hash = "sha256:e3f113cbf4331052622ec5c27e581751beba5c62e9af2f21d2798db50a41e04c"}, - {file = "spacy-3.5.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0c9e93851a210ccc59112243fc74dcac82191383e7654731c2842326f7d1eb1d"}, - {file = "spacy-3.5.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ca30de7f82ab97e054a457eeb424060091b609114ebf7c90ef1775cac40fe04"}, - {file = "spacy-3.5.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3083ccbcc52102bf53ff797233ea90a7d2b01c3853d811272ebc63de0aff4df5"}, - {file = "spacy-3.5.1-cp37-cp37m-win_amd64.whl", hash = "sha256:1e795b3f85f229ea54ff7f91e15fb5d7afacec5e5fca302dca1bc3224547e4f0"}, - {file = "spacy-3.5.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:fa47b47142883891252dda54da7a79055cb4e703914a90928c2fbe5bd058f4ed"}, - {file = "spacy-3.5.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d92387989fe9c3bebd60faaeb590206e34ca9c421a52460a058ee5050d9fc8c6"}, - {file = "spacy-3.5.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c1067f7ef0e87341cea2c3187f9b96965f4b0c076a87e22c1aac45ea5586f856"}, - {file = "spacy-3.5.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6ab781021e896aae4a0f9f0a5424c75fc5d6ef4c20f56fd115e8605484567fd6"}, - {file = "spacy-3.5.1-cp38-cp38-win_amd64.whl", hash = "sha256:c43b2597649549e84ceda7b658479e28c6e66995ebd9a61e0193b0c0dceffe50"}, - {file = "spacy-3.5.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9a389f850ab1a3f17e6beb90fd92533bad21a372979496b01a99ae1a9f3e96e3"}, - {file = "spacy-3.5.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:af8ca295e8381a0273b6543c1389275af98878a43ab70c781630277e49ce978f"}, - {file = "spacy-3.5.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a62a458c88c296234471fe540fe5d1ec763701d2f556870512143de8559286c0"}, - {file = "spacy-3.5.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:04ad29a306d1879cafe23e4e8a613046f62d81ceeb70e6fcab3fddb4b0fedf7f"}, - {file = "spacy-3.5.1-cp39-cp39-win_amd64.whl", hash = "sha256:c4be3508c9b4109afe3e5c7fdf91b9d7153ec2227f24270625caee96651fa9e2"}, - {file = "spacy-3.5.1.tar.gz", hash = "sha256:811ae1468c58b97fc9aa31187d6b55317784258f0a47ebf69d81cab639e3fa15"}, + {file = "spacy-3.5.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:39209f73508027a99ddf2a615ae99ceb6db84f9f10c0050c7dc0c78cd8d662e9"}, + {file = "spacy-3.5.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:abc2e347fa2217c97c602a591cd4202f3bea546e3beafe2b92dd4d2984b68299"}, + {file = "spacy-3.5.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d97294c588fcd05d0c644303dd54c8aa437bfd895b1c5e57f51ac0af8304181"}, + {file = "spacy-3.5.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8e7992c6424fd28187064ee32c98998db6194d65e017e958993dd16f6953c1c1"}, + {file = "spacy-3.5.4-cp310-cp310-win_amd64.whl", hash = "sha256:64cac9da114a2b98794a40e20ff2f8547dec01d44660c8d0dd64b2a5b32bf929"}, + {file = "spacy-3.5.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2796778a91f2d690864124a98f2fa4d3a82db6585244137d9283b4fbce21ef89"}, + {file = "spacy-3.5.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:97aea4aceb7d8a5a4183bad59957d6154d95e80d0b8a25690305fe5d4a8b8cb6"}, + {file = "spacy-3.5.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2aeb5f25ffb469c7c1f93a730c8810efe69ce65bb60318ae0e65b5106108df0c"}, + {file = "spacy-3.5.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b0f7166d8f20c6332d0ed89a1bc32b3030f223c178cc26597b094190c853a7ed"}, + {file = "spacy-3.5.4-cp311-cp311-win_amd64.whl", hash = "sha256:35dec614492c849f6c6b29dc0a424502dc193f6775d4f55573ad7d8f55e06561"}, + {file = "spacy-3.5.4-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0240874ed34d9e00df68cdbc3f1ca3741232233dc1194f24c18f73ae7dac7644"}, + {file = "spacy-3.5.4-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d1eb72163c8e8cb070bdafcfb8fb3c88f50a5b688500e8ef788fb4fb79e9997"}, + {file = "spacy-3.5.4-cp36-cp36m-win_amd64.whl", hash = "sha256:a4c7ba041aaffc9ecd0a3f9dff86f392939045221315f52e3044fe1453fc5d48"}, + {file = "spacy-3.5.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:61ab38c6732be402063f55b8b004b451b17dd20ccad966ab3abce9738e3859e4"}, + {file = "spacy-3.5.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b49807f1c47430f02365e7b0f25d2bddaaa917430e3dc3fbf0d60e0bffd5a06e"}, + {file = "spacy-3.5.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b59bdd41b372c52b639c6bb3b2e4d37cc5e6175b1d187f25c33a6b56c1d3d08c"}, + {file = "spacy-3.5.4-cp37-cp37m-win_amd64.whl", hash = "sha256:ab802c2e06ba14556ea4c160309a8369fad4bd847895e341e8b0bfe7c0e1bfcf"}, + {file = "spacy-3.5.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:406d09abc7c061ce1f461311557495608e25be5fc405f6a840e14a9a044f84bd"}, + {file = "spacy-3.5.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:0e9e0f9d95c6fbdc25f38e6d3bdad7d85723bcc8854333cc5f906d9a4db2b76a"}, + {file = "spacy-3.5.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1476db25cff811a43a19b79d12ce5b2a38dcbdc378fb9923f66aeb31c7f528c8"}, + {file = "spacy-3.5.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2fff8986c3b9aa9b5a99a1ad57e842985f71b450102d1e102d4ac951f595688c"}, + {file = "spacy-3.5.4-cp38-cp38-win_amd64.whl", hash = "sha256:d9b0d87f50a8e7592da2a7480956abd418ac143327b1c56244eca3c226c7332e"}, + {file = "spacy-3.5.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:abf05e7f64c9136602ec7cec54ff616c79dd89634ded5575587c619da9367db9"}, + {file = "spacy-3.5.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c270d2b37e6896b7959d493e56ed4d37146d7eec732253c91f07379685c08dd6"}, + {file = "spacy-3.5.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:af50c9838bf2ffa80397fb20f02127b0b66f1b26dcdcee86185292199c803041"}, + {file = "spacy-3.5.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ed28a237c57f95a36b891d3b60773b8efb81f6c470f48fea7e4ec71adb8b85a5"}, + {file = "spacy-3.5.4-cp39-cp39-win_amd64.whl", hash = "sha256:ad83768225e0ab2ee259ff5c1c759adb5c76649fb343ebd3bd777a3ec3742004"}, + {file = "spacy-3.5.4.tar.gz", hash = "sha256:9a9c167e9dcebfefacc75dac34a8e72becbe348eb45bbf06a6c0523ae05ac425"}, ] [package.dependencies] @@ -3228,29 +3091,29 @@ spacy-loggers = ">=1.0.0,<2.0.0" srsly = ">=2.4.3,<3.0.0" thinc = ">=8.1.8,<8.2.0" tqdm = ">=4.38.0,<5.0.0" -typer = ">=0.3.0,<0.8.0" +typer = ">=0.3.0,<0.10.0" wasabi = ">=0.9.1,<1.2.0" [package.extras] apple = ["thinc-apple-ops (>=0.1.0.dev0,<1.0.0)"] -cuda = ["cupy (>=5.0.0b4,<12.0.0)"] -cuda-autodetect = ["cupy-wheel (>=11.0.0,<12.0.0)"] -cuda100 = ["cupy-cuda100 (>=5.0.0b4,<12.0.0)"] -cuda101 = ["cupy-cuda101 (>=5.0.0b4,<12.0.0)"] -cuda102 = ["cupy-cuda102 (>=5.0.0b4,<12.0.0)"] -cuda110 = ["cupy-cuda110 (>=5.0.0b4,<12.0.0)"] -cuda111 = ["cupy-cuda111 (>=5.0.0b4,<12.0.0)"] -cuda112 = ["cupy-cuda112 (>=5.0.0b4,<12.0.0)"] -cuda113 = ["cupy-cuda113 (>=5.0.0b4,<12.0.0)"] -cuda114 = ["cupy-cuda114 (>=5.0.0b4,<12.0.0)"] -cuda115 = ["cupy-cuda115 (>=5.0.0b4,<12.0.0)"] -cuda116 = ["cupy-cuda116 (>=5.0.0b4,<12.0.0)"] -cuda117 = ["cupy-cuda117 (>=5.0.0b4,<12.0.0)"] -cuda11x = ["cupy-cuda11x (>=11.0.0,<12.0.0)"] -cuda80 = ["cupy-cuda80 (>=5.0.0b4,<12.0.0)"] -cuda90 = ["cupy-cuda90 (>=5.0.0b4,<12.0.0)"] -cuda91 = ["cupy-cuda91 (>=5.0.0b4,<12.0.0)"] -cuda92 = ["cupy-cuda92 (>=5.0.0b4,<12.0.0)"] +cuda = ["cupy (>=5.0.0b4,<13.0.0)"] +cuda-autodetect = ["cupy-wheel (>=11.0.0,<13.0.0)"] +cuda100 = ["cupy-cuda100 (>=5.0.0b4,<13.0.0)"] +cuda101 = ["cupy-cuda101 (>=5.0.0b4,<13.0.0)"] +cuda102 = ["cupy-cuda102 (>=5.0.0b4,<13.0.0)"] +cuda110 = ["cupy-cuda110 (>=5.0.0b4,<13.0.0)"] +cuda111 = ["cupy-cuda111 (>=5.0.0b4,<13.0.0)"] +cuda112 = ["cupy-cuda112 (>=5.0.0b4,<13.0.0)"] +cuda113 = ["cupy-cuda113 (>=5.0.0b4,<13.0.0)"] +cuda114 = ["cupy-cuda114 (>=5.0.0b4,<13.0.0)"] +cuda115 = ["cupy-cuda115 (>=5.0.0b4,<13.0.0)"] +cuda116 = ["cupy-cuda116 (>=5.0.0b4,<13.0.0)"] +cuda117 = ["cupy-cuda117 (>=5.0.0b4,<13.0.0)"] +cuda11x = ["cupy-cuda11x (>=11.0.0,<13.0.0)"] +cuda80 = ["cupy-cuda80 (>=5.0.0b4,<13.0.0)"] +cuda90 = ["cupy-cuda90 (>=5.0.0b4,<13.0.0)"] +cuda91 = ["cupy-cuda91 (>=5.0.0b4,<13.0.0)"] +cuda92 = ["cupy-cuda92 (>=5.0.0b4,<13.0.0)"] ja = ["sudachidict-core (>=20211220)", "sudachipy (>=0.5.2,!=0.6.1)"] ko = ["natto-py (>=0.9.0)"] lookups = ["spacy-lookups-data (>=1.0.3,<1.1.0)"] @@ -3262,7 +3125,6 @@ transformers = ["spacy-transformers (>=1.1.2,<1.3.0)"] name = "spacy-legacy" version = "3.0.12" description = "Legacy registered functions for spaCy backwards compatibility" -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -3274,7 +3136,6 @@ files = [ name = "spacy-loggers" version = "1.0.4" description = "Logging utilities for SpaCy" -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -3286,7 +3147,6 @@ files = [ name = "srsly" version = "2.4.6" description = "Modern high-performance serialization utilities for Python" -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -3327,7 +3187,6 @@ catalogue = ">=2.0.3,<2.1.0" name = "stack-data" version = "0.6.2" description = "Extract data from python stack frames and tracebacks for informative displays" -category = "dev" optional = false python-versions = "*" files = [ @@ -3347,7 +3206,6 @@ tests = ["cython", "littleutils", "pygments", "pytest", "typeguard"] name = "terminado" version = "0.17.1" description = "Tornado websocket backend for the Xterm.js Javascript terminal emulator library." -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -3368,7 +3226,6 @@ test = ["pre-commit", "pytest (>=7.0)", "pytest-timeout"] name = "thinc" version = "8.1.9" description = "A refreshing functional take on deep learning, compatible with your favorite libraries" -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -3444,7 +3301,6 @@ torch = ["torch (>=1.6.0)"] name = "tinycss2" version = "1.2.1" description = "A tiny CSS parser" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -3463,7 +3319,6 @@ test = ["flake8", "isort", "pytest"] name = "toml" version = "0.10.2" description = "Python Library for Tom's Obvious, Minimal Language" -category = "dev" optional = false python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" files = [ @@ -3475,7 +3330,6 @@ files = [ name = "tomli" version = "2.0.1" description = "A lil' TOML parser" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -3485,30 +3339,28 @@ files = [ [[package]] name = "tornado" -version = "6.2" +version = "6.3.2" description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed." -category = "dev" optional = false -python-versions = ">= 3.7" +python-versions = ">= 3.8" files = [ - {file = "tornado-6.2-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:20f638fd8cc85f3cbae3c732326e96addff0a15e22d80f049e00121651e82e72"}, - {file = "tornado-6.2-cp37-abi3-macosx_10_9_x86_64.whl", hash = "sha256:87dcafae3e884462f90c90ecc200defe5e580a7fbbb4365eda7c7c1eb809ebc9"}, - {file = "tornado-6.2-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ba09ef14ca9893954244fd872798b4ccb2367c165946ce2dd7376aebdde8e3ac"}, - {file = "tornado-6.2-cp37-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b8150f721c101abdef99073bf66d3903e292d851bee51910839831caba341a75"}, - {file = "tornado-6.2-cp37-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3a2f5999215a3a06a4fc218026cd84c61b8b2b40ac5296a6db1f1451ef04c1e"}, - {file = "tornado-6.2-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:5f8c52d219d4995388119af7ccaa0bcec289535747620116a58d830e7c25d8a8"}, - {file = "tornado-6.2-cp37-abi3-musllinux_1_1_i686.whl", hash = "sha256:6fdfabffd8dfcb6cf887428849d30cf19a3ea34c2c248461e1f7d718ad30b66b"}, - {file = "tornado-6.2-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:1d54d13ab8414ed44de07efecb97d4ef7c39f7438cf5e976ccd356bebb1b5fca"}, - {file = "tornado-6.2-cp37-abi3-win32.whl", hash = "sha256:5c87076709343557ef8032934ce5f637dbb552efa7b21d08e89ae7619ed0eb23"}, - {file = "tornado-6.2-cp37-abi3-win_amd64.whl", hash = "sha256:e5f923aa6a47e133d1cf87d60700889d7eae68988704e20c75fb2d65677a8e4b"}, - {file = "tornado-6.2.tar.gz", hash = "sha256:9b630419bde84ec666bfd7ea0a4cb2a8a651c2d5cccdbdd1972a0c859dfc3c13"}, + {file = "tornado-6.3.2-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:c367ab6c0393d71171123ca5515c61ff62fe09024fa6bf299cd1339dc9456829"}, + {file = "tornado-6.3.2-cp38-abi3-macosx_10_9_x86_64.whl", hash = "sha256:b46a6ab20f5c7c1cb949c72c1994a4585d2eaa0be4853f50a03b5031e964fc7c"}, + {file = "tornado-6.3.2-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c2de14066c4a38b4ecbbcd55c5cc4b5340eb04f1c5e81da7451ef555859c833f"}, + {file = "tornado-6.3.2-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:05615096845cf50a895026f749195bf0b10b8909f9be672f50b0fe69cba368e4"}, + {file = "tornado-6.3.2-cp38-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5b17b1cf5f8354efa3d37c6e28fdfd9c1c1e5122f2cb56dac121ac61baa47cbe"}, + {file = "tornado-6.3.2-cp38-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:29e71c847a35f6e10ca3b5c2990a52ce38b233019d8e858b755ea6ce4dcdd19d"}, + {file = "tornado-6.3.2-cp38-abi3-musllinux_1_1_i686.whl", hash = "sha256:834ae7540ad3a83199a8da8f9f2d383e3c3d5130a328889e4cc991acc81e87a0"}, + {file = "tornado-6.3.2-cp38-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:6a0848f1aea0d196a7c4f6772197cbe2abc4266f836b0aac76947872cd29b411"}, + {file = "tornado-6.3.2-cp38-abi3-win32.whl", hash = "sha256:7efcbcc30b7c654eb6a8c9c9da787a851c18f8ccd4a5a3a95b05c7accfa068d2"}, + {file = "tornado-6.3.2-cp38-abi3-win_amd64.whl", hash = "sha256:0c325e66c8123c606eea33084976c832aa4e766b7dff8aedd7587ea44a604cdf"}, + {file = "tornado-6.3.2.tar.gz", hash = "sha256:4b927c4f19b71e627b13f3db2324e4ae660527143f9e1f2e2fb404f3a187e2ba"}, ] [[package]] name = "tqdm" version = "4.65.0" description = "Fast, Extensible Progress Meter" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -3529,7 +3381,6 @@ telegram = ["requests"] name = "traitlets" version = "5.9.0" description = "Traitlets Python configuration system" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -3545,7 +3396,6 @@ test = ["argcomplete (>=2.0)", "pre-commit", "pytest", "pytest-mock"] name = "typer" version = "0.7.0" description = "Typer, build great CLIs. Easy to code. Based on Python type hints." -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -3566,7 +3416,6 @@ test = ["black (>=22.3.0,<23.0.0)", "coverage (>=6.2,<7.0)", "isort (>=5.0.6,<6. name = "typing-extensions" version = "4.5.0" description = "Backported and Experimental Type Hints for Python 3.7+" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -3578,7 +3427,6 @@ files = [ name = "uri-template" version = "1.2.0" description = "RFC 6570 URI Template Processor" -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -3593,7 +3441,6 @@ dev = ["flake8 (<4.0.0)", "flake8-annotations", "flake8-bugbear", "flake8-commas name = "urllib3" version = "1.26.15" description = "HTTP library with thread-safe connection pooling, file post, and more." -category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" files = [ @@ -3610,7 +3457,6 @@ socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] name = "wasabi" version = "1.1.1" description = "A lightweight console printing and formatting toolkit" -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -3625,7 +3471,6 @@ colorama = {version = ">=0.4.6", markers = "sys_platform == \"win32\" and python name = "watchdog" version = "2.3.1" description = "Filesystem events monitoring" -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -3666,7 +3511,6 @@ watchmedo = ["PyYAML (>=3.10)"] name = "wcwidth" version = "0.2.6" description = "Measures the displayed width of unicode strings in a terminal" -category = "dev" optional = false python-versions = "*" files = [ @@ -3678,7 +3522,6 @@ files = [ name = "webcolors" version = "1.12" description = "A library for working with color names and color values formats defined by HTML and CSS." -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -3690,7 +3533,6 @@ files = [ name = "webencodings" version = "0.5.1" description = "Character encoding aliases for legacy web content" -category = "dev" optional = false python-versions = "*" files = [ @@ -3702,7 +3544,6 @@ files = [ name = "websocket-client" version = "1.5.1" description = "WebSocket client for Python with low level API options" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -3719,7 +3560,6 @@ test = ["websockets"] name = "widgetsnbextension" version = "4.0.5" description = "Jupyter interactive widgets for Jupyter Notebook" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -3731,7 +3571,6 @@ files = [ name = "wrapt" version = "1.15.0" description = "Module for decorators, wrappers and monkey patching." -category = "main" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7" files = [ @@ -3815,4 +3654,4 @@ files = [ [metadata] lock-version = "2.0" python-versions = "^3.10,<3.12" -content-hash = "0deb71d5a186f72860c466ec53aa3d909d25984b6f3b9a26678807e0775d45de" +content-hash = "3db2a1bc7b1807318d9cb22383b1953aed1c15fad2002b3befcf52f34a1eb25f" diff --git a/pyproject.toml b/pyproject.toml index 672dcb2f..122aca81 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -19,7 +19,7 @@ analyze-library = "library_analyzer.main:main" python = "^3.10,<3.12" astroid = "^2.14.2" black = "^23.1.0" -levenshtein = "^0.20.9" +levenshtein = ">=0.20.9,<0.22.0" scipy = "^1.10.1" spacy = "^3.5.1" docstring-parser = "^0.15" @@ -31,11 +31,11 @@ pytest-cov = "^4.0.0" [tool.poetry.group.docs.dependencies] jupyter = "^1.0.0" mkdocs = "^1.4.2" -mkdocstrings = "^0.20.0" -mkdocstrings-python = "^0.8.3" +mkdocstrings = ">=0.20,<0.23" +mkdocstrings-python = ">=0.8.3,<1.2.0" mkdocs-autorefs = "^0.4.1" mkdocs-exclude = "^1.0.2" -mkdocs-gen-files = "^0.4.0" +mkdocs-gen-files = ">=0.4,<0.6" mkdocs-glightbox = "^0.3.1" mkdocs-jupyter = "^0.24.0" mkdocs-literate-nav = "^0.6.0" diff --git a/src/library_analyzer/cli/_json_encoder.py b/src/library_analyzer/cli/_json_encoder.py deleted file mode 100644 index 2e2fc628..00000000 --- a/src/library_analyzer/cli/_json_encoder.py +++ /dev/null @@ -1,9 +0,0 @@ -from json import JSONEncoder -from typing import Any - - -class CustomEncoder(JSONEncoder): - def default(self, o: Any) -> Any: - if isinstance(o, set): - return list(o) - return super().default(o) diff --git a/src/library_analyzer/cli/_read_and_write_file.py b/src/library_analyzer/cli/_read_and_write_file.py deleted file mode 100644 index 38e48ee0..00000000 --- a/src/library_analyzer/cli/_read_and_write_file.py +++ /dev/null @@ -1,51 +0,0 @@ -import json -from pathlib import Path - -from library_analyzer.cli._json_encoder import CustomEncoder -from library_analyzer.processing.annotations.model import AnnotationStore -from library_analyzer.processing.api.model import API -from library_analyzer.processing.dependencies._parameter_dependencies import APIDependencies -from library_analyzer.processing.usages.model import UsageCountStore -from library_analyzer.utils import ensure_file_exists - - -def _read_annotations_file(annotations_file_path: Path) -> AnnotationStore: - with annotations_file_path.open(encoding="utf-8") as annotations_file: - annotations_json = json.load(annotations_file) - - return AnnotationStore.from_json(annotations_json) - - -def _write_annotations_file(annotations: AnnotationStore, annotations_file_path: Path) -> None: - ensure_file_exists(annotations_file_path) - with annotations_file_path.open("w", encoding="utf-8") as f: - json.dump(annotations.to_json(), f, indent=2) - - -def _read_api_file(api_file_path: Path) -> API: - with api_file_path.open(encoding="utf-8") as api_file: - api_json = json.load(api_file) - - return API.from_json(api_json) - - -def _read_usages_file(usages_file_path: Path) -> UsageCountStore: - with usages_file_path.open(encoding="utf-8") as usages_file: - usages_json = json.load(usages_file) - - return UsageCountStore.from_json(usages_json) - - -def _write_api_file(api: API, out_dir_path: Path) -> Path: - out_file_api = out_dir_path.joinpath(f"{api.package}__api.json") - ensure_file_exists(out_file_api) - with out_file_api.open("w", encoding="utf-8") as f: - json.dump(api.to_json(), f, indent=2, cls=CustomEncoder) - return out_file_api - - -def _write_api_dependency_file(api: API, api_dependencies: APIDependencies, out: Path) -> None: - out_file_api_dependencies = out.joinpath(f"{api.package}__api_dependencies.json") - ensure_file_exists(out_file_api_dependencies) - with out_file_api_dependencies.open("w") as f: - json.dump(api_dependencies.to_json(), f, indent=2, cls=CustomEncoder) diff --git a/src/library_analyzer/cli/_run_annotations.py b/src/library_analyzer/cli/_run_annotations.py index 9a6731d8..dc642b84 100644 --- a/src/library_analyzer/cli/_run_annotations.py +++ b/src/library_analyzer/cli/_run_annotations.py @@ -1,12 +1,8 @@ from pathlib import Path from library_analyzer.processing.annotations import generate_annotations - -from ._read_and_write_file import ( - _read_api_file, - _read_usages_file, - _write_annotations_file, -) +from library_analyzer.processing.api.model import API +from library_analyzer.processing.usages.model import UsageCountStore def _run_annotations(api_file_path: Path, usages_file_path: Path, annotations_file_path: Path) -> None: @@ -15,11 +11,16 @@ def _run_annotations(api_file_path: Path, usages_file_path: Path, annotations_fi Annotations that are generated are: remove, constant, required, optional, enum and boundary. - :param api_file_path: API file Path - :param usages_file_path: UsageStore file Path - :param annotations_file_path: Output file Path. + Parameters + ---------- + api_file_path : Path + API file Path + usages_file_path : Path + UsageStore file Path + annotations_file_path : Path + Output file Path. """ - api = _read_api_file(api_file_path) - usages = _read_usages_file(usages_file_path) + api = API.from_json_file(api_file_path) + usages = UsageCountStore.from_json_file(usages_file_path) annotations = generate_annotations(api, usages) - _write_annotations_file(annotations, annotations_file_path) + annotations.to_json_file(annotations_file_path) diff --git a/src/library_analyzer/cli/_run_api.py b/src/library_analyzer/cli/_run_api.py index 8d81a850..c0c480d5 100644 --- a/src/library_analyzer/cli/_run_api.py +++ b/src/library_analyzer/cli/_run_api.py @@ -4,22 +4,31 @@ from library_analyzer.processing.api.docstring_parsing import DocstringStyle from library_analyzer.processing.dependencies import get_dependencies -from ._read_and_write_file import _write_api_dependency_file, _write_api_file -from ._shared_constants import _API_KEY - def _run_api_command( package: str, src_dir_path: Path, out_dir_path: Path, docstring_style: DocstringStyle, - result_dict: dict | None = None, ) -> None: - api = get_api(package, src_dir_path, docstring_style) - api_dependencies = get_dependencies(api) + """ + List the API of a package. - api_file_path = _write_api_file(api, out_dir_path) - _write_api_dependency_file(api, api_dependencies, out_dir_path) + Parameters + ---------- + package : str + The name of the package. + src_dir_path : Path + The path to the source directory of the package. + out_dir_path : Path + The path to the output directory. + docstring_style : DocstringStyle + The style of docstrings that used in the library. + """ + api = get_api(package, src_dir_path, docstring_style) + out_file_api = out_dir_path.joinpath(f"{package}__api.json") + api.to_json_file(out_file_api) - if result_dict is not None: - result_dict[_API_KEY] = api_file_path + api_dependencies = get_dependencies(api) + out_file_api_dependencies = out_dir_path.joinpath(f"{package}__api_dependencies.json") + api_dependencies.to_json_file(out_file_api_dependencies) diff --git a/src/library_analyzer/cli/_run_migrate.py b/src/library_analyzer/cli/_run_migrate.py index 06c039b5..69dc4b0b 100644 --- a/src/library_analyzer/cli/_run_migrate.py +++ b/src/library_analyzer/cli/_run_migrate.py @@ -1,6 +1,8 @@ from pathlib import Path from typing import Any +from library_analyzer.processing.annotations.model import AnnotationStore +from library_analyzer.processing.api.model import API from library_analyzer.processing.migration import Migration from library_analyzer.processing.migration.model import ( AbstractDiffer, @@ -12,12 +14,6 @@ UnchangedDiffer, ) -from ._read_and_write_file import ( - _read_annotations_file, - _read_api_file, - _write_annotations_file, -) - def _run_migrate_command( apiv1_file_path: Path, @@ -25,12 +21,21 @@ def _run_migrate_command( apiv2_file_path: Path, out_dir_path: Path, ) -> None: - apiv1 = _read_api_file(apiv1_file_path) - apiv2 = _read_api_file(apiv2_file_path) - annotationsv1 = _read_annotations_file(annotations_file_path) + apiv1 = API.from_json_file(apiv1_file_path) + apiv2 = API.from_json_file(apiv2_file_path) + annotationsv1 = AnnotationStore.from_json_file(annotations_file_path) + + threshold_of_similarity_for_creation_of_mappings = 0.61 + threshold_of_similarity_between_mappings = 0.23 unchanged_differ = UnchangedDiffer(None, [], apiv1, apiv2) - api_mapping = APIMapping(apiv1, apiv2, unchanged_differ) + api_mapping = APIMapping( + apiv1, + apiv2, + unchanged_differ, + threshold_of_similarity_for_creation_of_mappings, + threshold_of_similarity_between_mappings, + ) unchanged_mappings: list[Mapping] = api_mapping.map_api() previous_mappings = unchanged_mappings previous_base_differ: AbstractDiffer | None = unchanged_differ @@ -44,7 +49,13 @@ def _run_migrate_command( for differ_init in differ_init_list: differ_class, additional_parameters = differ_init differ = differ_class(previous_base_differ, previous_mappings, apiv1, apiv2, **additional_parameters) - api_mapping = APIMapping(apiv1, apiv2, differ) + api_mapping = APIMapping( + apiv1, + apiv2, + differ, + threshold_of_similarity_for_creation_of_mappings, + threshold_of_similarity_between_mappings, + ) mappings = api_mapping.map_api() previous_mappings = mappings @@ -56,5 +67,5 @@ def _run_migrate_command( migration.print(apiv1, apiv2) migrated_annotations_file = out_dir_path / f"migrated_annotationsv{apiv2.version}.json" unsure_migrated_annotations_file = out_dir_path / f"unsure_migrated_annotationsv{apiv2.version}.json" - _write_annotations_file(migration.migrated_annotation_store, migrated_annotations_file) - _write_annotations_file(migration.unsure_migrated_annotation_store, unsure_migrated_annotations_file) + migration.migrated_annotation_store.to_json_file(migrated_annotations_file) + migration.unsure_migrated_annotation_store.to_json_file(unsure_migrated_annotations_file) diff --git a/src/library_analyzer/cli/_run_usages.py b/src/library_analyzer/cli/_run_usages.py index 5a0eeecc..05cd7b93 100644 --- a/src/library_analyzer/cli/_run_usages.py +++ b/src/library_analyzer/cli/_run_usages.py @@ -1,9 +1,6 @@ -import json from pathlib import Path -from library_analyzer.cli._shared_constants import _USAGES_KEY from library_analyzer.processing.usages import find_usages -from library_analyzer.utils import ensure_file_exists def _run_usages_command( @@ -12,14 +9,23 @@ def _run_usages_command( out_dir_path: Path, n_processes: int, batch_size: int, - result_dict: dict | None = None, ) -> None: - usages = find_usages(package, client_dir_path, n_processes, batch_size) + """ + Find usages of API elements. + Parameters + ---------- + package : str + The name of the package. + client_dir_path : Path + The path to the directory with the client code + out_dir_path : Path + The path to the output directory. + n_processes : int + The number of processes to use. + batch_size : int + The batch size to use. + """ + usages = find_usages(package, client_dir_path, n_processes, batch_size) out_file_usage_count = out_dir_path.joinpath(f"{package}__usage_counts.json") - ensure_file_exists(out_file_usage_count) - with out_file_usage_count.open("w") as f: - json.dump(usages.to_json(), f, indent=2) - - if result_dict is not None: - result_dict[_USAGES_KEY] = out_file_usage_count + usages.to_json_file(out_file_usage_count) diff --git a/src/library_analyzer/cli/_shared_constants.py b/src/library_analyzer/cli/_shared_constants.py deleted file mode 100644 index e5facd9f..00000000 --- a/src/library_analyzer/cli/_shared_constants.py +++ /dev/null @@ -1,2 +0,0 @@ -_API_KEY = "api" -_USAGES_KEY = "usages" diff --git a/src/library_analyzer/processing/annotations/model/_annotation_store.py b/src/library_analyzer/processing/annotations/model/_annotation_store.py index dd599bbc..e9cb6877 100644 --- a/src/library_analyzer/processing/annotations/model/_annotation_store.py +++ b/src/library_analyzer/processing/annotations/model/_annotation_store.py @@ -1,7 +1,10 @@ from __future__ import annotations +import json from dataclasses import dataclass, field -from typing import Any +from typing import TYPE_CHECKING, Any + +from library_analyzer.utils import ensure_file_exists from ._annotations import ( ANNOTATION_SCHEMA_VERSION, @@ -21,6 +24,9 @@ ValueAnnotation, ) +if TYPE_CHECKING: + from pathlib import Path + @dataclass class AnnotationStore: @@ -39,61 +45,68 @@ class AnnotationStore: valueAnnotations: list[ValueAnnotation] = field(default_factory=list) # noqa: N815 @staticmethod - def from_json(json: Any) -> AnnotationStore: - if json["schemaVersion"] == 1: + def from_json_file(path: Path) -> AnnotationStore: + with path.open(encoding="utf-8") as annotations_file: + annotations_json = json.load(annotations_file) + + return AnnotationStore.from_dict(annotations_json) + + @staticmethod + def from_dict(d: dict[str, Any]) -> AnnotationStore: + if d["schemaVersion"] == 1: raise ValueError("Incompatible Annotation File: This file is not compatible with the current version.") boundary_annotations = [] - for annotation in json["boundaryAnnotations"].values(): - boundary_annotations.append(BoundaryAnnotation.from_json(annotation)) + for annotation in d["boundaryAnnotations"].values(): + boundary_annotations.append(BoundaryAnnotation.from_dict(annotation)) called_after_annotations = [] - for annotation in json["calledAfterAnnotations"].values(): - called_after_annotations.append(CalledAfterAnnotation.from_json(annotation)) + for annotation in d["calledAfterAnnotations"].values(): + called_after_annotations.append(CalledAfterAnnotation.from_dict(annotation)) complete_annotations = [] - for annotation in json["completeAnnotations"].values(): - complete_annotations.append(CompleteAnnotation.from_json(annotation)) + for annotation in d["completeAnnotations"].values(): + complete_annotations.append(CompleteAnnotation.from_dict(annotation)) description_annotations = [] - for annotation in json["descriptionAnnotations"].values(): - description_annotations.append(DescriptionAnnotation.from_json(annotation)) + for annotation in d["descriptionAnnotations"].values(): + description_annotations.append(DescriptionAnnotation.from_dict(annotation)) enum_annotations = [] - for annotation in json["enumAnnotations"].values(): - enum_annotations.append(EnumAnnotation.from_json(annotation)) + for annotation in d["enumAnnotations"].values(): + enum_annotations.append(EnumAnnotation.from_dict(annotation)) expert_annotations = [] - for annotation in json["expertAnnotations"].values(): - expert_annotations.append(ExpertAnnotation.from_json(annotation)) + for annotation in d["expertAnnotations"].values(): + expert_annotations.append(ExpertAnnotation.from_dict(annotation)) group_annotations = [] - for annotation in json["groupAnnotations"].values(): - group_annotations.append(GroupAnnotation.from_json(annotation)) + for annotation in d["groupAnnotations"].values(): + group_annotations.append(GroupAnnotation.from_dict(annotation)) move_annotations = [] - for annotation in json["moveAnnotations"].values(): - move_annotations.append(MoveAnnotation.from_json(annotation)) + for annotation in d["moveAnnotations"].values(): + move_annotations.append(MoveAnnotation.from_dict(annotation)) pure_annotations = [] - for annotation in json["pureAnnotations"].values(): - pure_annotations.append(PureAnnotation.from_json(annotation)) + for annotation in d["pureAnnotations"].values(): + pure_annotations.append(PureAnnotation.from_dict(annotation)) remove_annotations = [] - for annotation in json["removeAnnotations"].values(): - remove_annotations.append(RemoveAnnotation.from_json(annotation)) + for annotation in d["removeAnnotations"].values(): + remove_annotations.append(RemoveAnnotation.from_dict(annotation)) rename_annotations = [] - for annotation in json["renameAnnotations"].values(): - rename_annotations.append(RenameAnnotation.from_json(annotation)) + for annotation in d["renameAnnotations"].values(): + rename_annotations.append(RenameAnnotation.from_dict(annotation)) todo_annotations = [] - for annotation in json["todoAnnotations"].values(): - todo_annotations.append(TodoAnnotation.from_json(annotation)) + for annotation in d["todoAnnotations"].values(): + todo_annotations.append(TodoAnnotation.from_dict(annotation)) value_annotations = [] - for annotation in json["valueAnnotations"].values(): - value_annotations.append(ValueAnnotation.from_json(annotation)) + for annotation in d["valueAnnotations"].values(): + value_annotations.append(ValueAnnotation.from_dict(annotation)) return AnnotationStore( boundary_annotations, @@ -139,24 +152,29 @@ def add_annotation(self, annotation: AbstractAnnotation) -> None: elif isinstance(annotation, ValueAnnotation): self.valueAnnotations.append(annotation) - def to_json(self) -> dict: + def to_json_file(self, path: Path) -> None: + ensure_file_exists(path) + with path.open("w", encoding="utf-8") as f: + json.dump(self.to_dict(), f, indent=2) + + def to_dict(self) -> dict: return { "schemaVersion": ANNOTATION_SCHEMA_VERSION, - "boundaryAnnotations": {annotation.target: annotation.to_json() for annotation in self.boundaryAnnotations}, + "boundaryAnnotations": {annotation.target: annotation.to_dict() for annotation in self.boundaryAnnotations}, "calledAfterAnnotations": { - annotation.target: annotation.to_json() for annotation in self.calledAfterAnnotations + annotation.target: annotation.to_dict() for annotation in self.calledAfterAnnotations }, - "completeAnnotations": {annotation.target: annotation.to_json() for annotation in self.completeAnnotations}, + "completeAnnotations": {annotation.target: annotation.to_dict() for annotation in self.completeAnnotations}, "descriptionAnnotations": { - annotation.target: annotation.to_json() for annotation in self.descriptionAnnotations + annotation.target: annotation.to_dict() for annotation in self.descriptionAnnotations }, - "enumAnnotations": {annotation.target: annotation.to_json() for annotation in self.enumAnnotations}, - "expertAnnotations": {annotation.target: annotation.to_json() for annotation in self.expertAnnotations}, - "groupAnnotations": {annotation.target: annotation.to_json() for annotation in self.groupAnnotations}, - "moveAnnotations": {annotation.target: annotation.to_json() for annotation in self.moveAnnotations}, - "pureAnnotations": {annotation.target: annotation.to_json() for annotation in self.pureAnnotations}, - "renameAnnotations": {annotation.target: annotation.to_json() for annotation in self.renameAnnotations}, - "removeAnnotations": {annotation.target: annotation.to_json() for annotation in self.removeAnnotations}, - "todoAnnotations": {annotation.target: annotation.to_json() for annotation in self.todoAnnotations}, - "valueAnnotations": {annotation.target: annotation.to_json() for annotation in self.valueAnnotations}, + "enumAnnotations": {annotation.target: annotation.to_dict() for annotation in self.enumAnnotations}, + "expertAnnotations": {annotation.target: annotation.to_dict() for annotation in self.expertAnnotations}, + "groupAnnotations": {annotation.target: annotation.to_dict() for annotation in self.groupAnnotations}, + "moveAnnotations": {annotation.target: annotation.to_dict() for annotation in self.moveAnnotations}, + "pureAnnotations": {annotation.target: annotation.to_dict() for annotation in self.pureAnnotations}, + "renameAnnotations": {annotation.target: annotation.to_dict() for annotation in self.renameAnnotations}, + "removeAnnotations": {annotation.target: annotation.to_dict() for annotation in self.removeAnnotations}, + "todoAnnotations": {annotation.target: annotation.to_dict() for annotation in self.todoAnnotations}, + "valueAnnotations": {annotation.target: annotation.to_dict() for annotation in self.valueAnnotations}, } diff --git a/src/library_analyzer/processing/annotations/model/_annotations.py b/src/library_analyzer/processing/annotations/model/_annotations.py index d4ed6a1c..a0a0e791 100644 --- a/src/library_analyzer/processing/annotations/model/_annotations.py +++ b/src/library_analyzer/processing/annotations/model/_annotations.py @@ -15,7 +15,7 @@ class EnumReviewResult(Enum): NONE = "" @staticmethod - def to_json(result: list[tuple[str, Any]]) -> dict[str, Any]: + def to_dict(result: list[tuple[str, Any]]) -> dict[str, Any]: for item in result: if isinstance(item[1], EnumReviewResult): result.append((item[0], item[1].value)) @@ -31,27 +31,27 @@ class AbstractAnnotation(ABC): comment: str reviewResult: EnumReviewResult # noqa: N815 - def to_json(self) -> dict: - return asdict(self, dict_factory=EnumReviewResult.to_json) - @staticmethod - def from_json(json: Any) -> AbstractAnnotation: - review_result = EnumReviewResult(json.get("reviewResult", "")) + def from_dict(d: dict[str, Any]) -> AbstractAnnotation: + review_result = EnumReviewResult(d.get("reviewResult", "")) return AbstractAnnotation( - json["target"], - json["authors"], - json["reviewers"], - json.get("comment", ""), + d["target"], + d["authors"], + d["reviewers"], + d.get("comment", ""), review_result, ) + def to_dict(self) -> dict[str, Any]: + return asdict(self, dict_factory=EnumReviewResult.to_dict) + @dataclass class RemoveAnnotation(AbstractAnnotation): @staticmethod - def from_json(json: Any) -> RemoveAnnotation: - annotation = AbstractAnnotation.from_json(json) + def from_dict(d: dict[str, Any]) -> RemoveAnnotation: + annotation = AbstractAnnotation.from_dict(d) return RemoveAnnotation( annotation.target, annotation.authors, @@ -69,58 +69,58 @@ class Interval: upperLimitType: int # noqa: N815 isDiscrete: bool # noqa: N815 - def to_json(self) -> dict: - return asdict(self) - @staticmethod - def from_json(json: Any) -> Interval: + def from_dict(d: dict[str, Any]) -> Interval: return Interval( - json["lowerIntervalLimit"], - json["lowerLimitType"], - json["upperIntervalLimit"], - json["upperLimitType"], - json["isDiscrete"], + d["lowerIntervalLimit"], + d["lowerLimitType"], + d["upperIntervalLimit"], + d["upperLimitType"], + d["isDiscrete"], ) + def to_dict(self) -> dict: + return asdict(self) + @dataclass class BoundaryAnnotation(AbstractAnnotation): interval: Interval - def to_json(self) -> dict: - return { - "target": self.target, - "authors": self.authors, - "reviewers": self.reviewers, - "comment": self.comment, - "reviewResult": self.reviewResult.value, - "interval": self.interval.to_json(), - } - @staticmethod - def from_json(json: Any) -> BoundaryAnnotation: - annotation = AbstractAnnotation.from_json(json) + def from_dict(d: dict[str, Any]) -> BoundaryAnnotation: + annotation = AbstractAnnotation.from_dict(d) return BoundaryAnnotation( annotation.target, annotation.authors, annotation.reviewers, annotation.comment, annotation.reviewResult, - Interval.from_json(json["interval"]), + Interval.from_dict(d["interval"]), ) + def to_dict(self) -> dict[str, Any]: + return { + "target": self.target, + "authors": self.authors, + "reviewers": self.reviewers, + "comment": self.comment, + "reviewResult": self.reviewResult.value, + "interval": self.interval.to_dict(), + } + @dataclass class EnumPair: stringValue: str # noqa: N815 instanceName: str # noqa: N815 - def to_json(self) -> dict: - return asdict(self) - @staticmethod - def from_json(json: Any) -> EnumPair: - return EnumPair(json["stringValue"], json["instanceName"]) + def from_dict(d: dict[str, Any]) -> EnumPair: + return EnumPair(d["stringValue"], d["instanceName"]) + + def to_dict(self) -> dict[str, Any]: + return asdict(self) @dataclass @@ -128,31 +128,31 @@ class EnumAnnotation(AbstractAnnotation): enumName: str # noqa: N815 pairs: list[EnumPair] - def to_json(self) -> dict: - return { - "target": self.target, - "authors": self.authors, - "reviewers": self.reviewers, - "comment": self.comment, - "reviewResult": self.reviewResult.value, - "enumName": self.enumName, - "pairs": [pair.to_json() for pair in self.pairs], - } - @staticmethod - def from_json(json: Any) -> EnumAnnotation: - annotation = AbstractAnnotation.from_json(json) - pairs = [EnumPair.from_json(enum_pair) for enum_pair in json["pairs"]] + def from_dict(d: dict[str, Any]) -> EnumAnnotation: + annotation = AbstractAnnotation.from_dict(d) + pairs = [EnumPair.from_dict(enum_pair) for enum_pair in d["pairs"]] return EnumAnnotation( annotation.target, annotation.authors, annotation.reviewers, annotation.comment, annotation.reviewResult, - json["enumName"], + d["enumName"], pairs, ) + def to_dict(self) -> dict[str, Any]: + return { + "target": self.target, + "authors": self.authors, + "reviewers": self.reviewers, + "comment": self.comment, + "reviewResult": self.reviewResult.value, + "enumName": self.enumName, + "pairs": [pair.to_dict() for pair in self.pairs], + } + class ValueAnnotation(AbstractAnnotation, ABC): class Variant(Enum): @@ -170,16 +170,16 @@ class DefaultValueType(Enum): variant: Variant @staticmethod - def from_json(json: Any) -> ValueAnnotation: - variant = json["variant"] + def from_dict(d: dict[str, Any]) -> ValueAnnotation: + variant = d["variant"] if ValueAnnotation.Variant.CONSTANT.value == variant: - return ConstantAnnotation.from_json(json) + return ConstantAnnotation.from_dict(d) if ValueAnnotation.Variant.OMITTED.value == variant: - return OmittedAnnotation.from_json(json) + return OmittedAnnotation.from_dict(d) if ValueAnnotation.Variant.OPTIONAL.value == variant: - return OptionalAnnotation.from_json(json) + return OptionalAnnotation.from_dict(d) if ValueAnnotation.Variant.REQUIRED.value == variant: - return RequiredAnnotation.from_json(json) + return RequiredAnnotation.from_dict(d) raise KeyError("unkonwn variant found") @@ -189,7 +189,20 @@ class ConstantAnnotation(ValueAnnotation): defaultValueType: ValueAnnotation.DefaultValueType # noqa: N815 defaultValue: Any # noqa: N815 - def to_json(self) -> dict: + @staticmethod + def from_dict(d: dict[str, Any]) -> ConstantAnnotation: + annotation = AbstractAnnotation.from_dict(d) + return ConstantAnnotation( + annotation.target, + annotation.authors, + annotation.reviewers, + annotation.comment, + annotation.reviewResult, + ValueAnnotation.DefaultValueType(d["defaultValueType"]), + d["defaultValue"], + ) + + def to_dict(self) -> dict[str, Any]: return { "target": self.target, "authors": self.authors, @@ -201,25 +214,23 @@ def to_json(self) -> dict: "defaultValue": self.defaultValue, } + +@dataclass +class OmittedAnnotation(ValueAnnotation): + variant = ValueAnnotation.Variant.OMITTED + @staticmethod - def from_json(json: Any) -> ConstantAnnotation: - annotation = AbstractAnnotation.from_json(json) - return ConstantAnnotation( + def from_dict(d: dict[str, Any]) -> OmittedAnnotation: + annotation = AbstractAnnotation.from_dict(d) + return OmittedAnnotation( annotation.target, annotation.authors, annotation.reviewers, annotation.comment, annotation.reviewResult, - ValueAnnotation.DefaultValueType(json["defaultValueType"]), - json["defaultValue"], ) - -@dataclass -class OmittedAnnotation(ValueAnnotation): - variant = ValueAnnotation.Variant.OMITTED - - def to_json(self) -> dict: + def to_dict(self) -> dict[str, Any]: return { "target": self.target, "authors": self.authors, @@ -229,25 +240,27 @@ def to_json(self) -> dict: "variant": self.variant.value, } + +@dataclass +class OptionalAnnotation(ValueAnnotation): + variant = ValueAnnotation.Variant.OPTIONAL + defaultValueType: ValueAnnotation.DefaultValueType # noqa: N815 + defaultValue: Any # noqa: N815 + @staticmethod - def from_json(json: Any) -> OmittedAnnotation: - annotation = AbstractAnnotation.from_json(json) - return OmittedAnnotation( + def from_dict(d: dict[str, Any]) -> OptionalAnnotation: + annotation = AbstractAnnotation.from_dict(d) + return OptionalAnnotation( annotation.target, annotation.authors, annotation.reviewers, annotation.comment, annotation.reviewResult, + ValueAnnotation.DefaultValueType(d["defaultValueType"]), + d["defaultValue"], ) - -@dataclass -class OptionalAnnotation(ValueAnnotation): - variant = ValueAnnotation.Variant.OPTIONAL - defaultValueType: ValueAnnotation.DefaultValueType # noqa: N815 - defaultValue: Any # noqa: N815 - - def to_json(self) -> dict: + def to_dict(self) -> dict[str, Any]: return { "target": self.target, "authors": self.authors, @@ -259,25 +272,23 @@ def to_json(self) -> dict: "defaultValue": self.defaultValue, } + +@dataclass +class RequiredAnnotation(ValueAnnotation): + variant = ValueAnnotation.Variant.REQUIRED + @staticmethod - def from_json(json: Any) -> OptionalAnnotation: - annotation = AbstractAnnotation.from_json(json) - return OptionalAnnotation( + def from_dict(d: dict[str, Any]) -> RequiredAnnotation: + annotation = AbstractAnnotation.from_dict(d) + return RequiredAnnotation( annotation.target, annotation.authors, annotation.reviewers, annotation.comment, annotation.reviewResult, - ValueAnnotation.DefaultValueType(json["defaultValueType"]), - json["defaultValue"], ) - -@dataclass -class RequiredAnnotation(ValueAnnotation): - variant = ValueAnnotation.Variant.REQUIRED - - def to_json(self) -> dict: + def to_dict(self) -> dict[str, Any]: return { "target": self.target, "authors": self.authors, @@ -287,17 +298,6 @@ def to_json(self) -> dict: "variant": self.variant.value, } - @staticmethod - def from_json(json: Any) -> RequiredAnnotation: - annotation = AbstractAnnotation.from_json(json) - return RequiredAnnotation( - annotation.target, - annotation.authors, - annotation.reviewers, - annotation.comment, - annotation.reviewResult, - ) - class ParameterType(Enum): Constant = 0 @@ -322,22 +322,22 @@ class CalledAfterAnnotation(AbstractAnnotation): calledAfterName: str # noqa: N815 @staticmethod - def from_json(json: Any) -> CalledAfterAnnotation: - annotation = AbstractAnnotation.from_json(json) + def from_dict(d: Any) -> CalledAfterAnnotation: + annotation = AbstractAnnotation.from_dict(d) return CalledAfterAnnotation( annotation.target, annotation.authors, annotation.reviewers, annotation.comment, annotation.reviewResult, - json["calledAfterName"], + d["calledAfterName"], ) class CompleteAnnotation(AbstractAnnotation): @staticmethod - def from_json(json: Any) -> CompleteAnnotation: - annotation = AbstractAnnotation.from_json(json) + def from_dict(d: Any) -> CompleteAnnotation: + annotation = AbstractAnnotation.from_dict(d) return CompleteAnnotation( annotation.target, annotation.authors, @@ -352,23 +352,23 @@ class DescriptionAnnotation(AbstractAnnotation): newDescription: str # noqa: N815 @staticmethod - def from_json(json: Any) -> DescriptionAnnotation: - annotation = AbstractAnnotation.from_json(json) + def from_dict(d: Any) -> DescriptionAnnotation: + annotation = AbstractAnnotation.from_dict(d) return DescriptionAnnotation( annotation.target, annotation.authors, annotation.reviewers, annotation.comment, annotation.reviewResult, - json["newDescription"], + d["newDescription"], ) @dataclass class ExpertAnnotation(AbstractAnnotation): @staticmethod - def from_json(json: Any) -> ExpertAnnotation: - annotation = AbstractAnnotation.from_json(json) + def from_dict(d: Any) -> ExpertAnnotation: + annotation = AbstractAnnotation.from_dict(d) return ExpertAnnotation( annotation.target, annotation.authors, @@ -384,16 +384,16 @@ class GroupAnnotation(AbstractAnnotation): parameters: list[str] @staticmethod - def from_json(json: Any) -> GroupAnnotation: - annotation = AbstractAnnotation.from_json(json) + def from_dict(d: Any) -> GroupAnnotation: + annotation = AbstractAnnotation.from_dict(d) return GroupAnnotation( annotation.target, annotation.authors, annotation.reviewers, annotation.comment, annotation.reviewResult, - json["groupName"], - json["parameters"], + d["groupName"], + d["parameters"], ) @@ -402,22 +402,22 @@ class MoveAnnotation(AbstractAnnotation): destination: str @staticmethod - def from_json(json: Any) -> MoveAnnotation: - annotation = AbstractAnnotation.from_json(json) + def from_dict(d: Any) -> MoveAnnotation: + annotation = AbstractAnnotation.from_dict(d) return MoveAnnotation( annotation.target, annotation.authors, annotation.reviewers, annotation.comment, annotation.reviewResult, - json["destination"], + d["destination"], ) class PureAnnotation(AbstractAnnotation): @staticmethod - def from_json(json: Any) -> PureAnnotation: - annotation = AbstractAnnotation.from_json(json) + def from_dict(d: Any) -> PureAnnotation: + annotation = AbstractAnnotation.from_dict(d) return PureAnnotation( annotation.target, annotation.authors, @@ -432,15 +432,15 @@ class RenameAnnotation(AbstractAnnotation): newName: str # noqa: N815 @staticmethod - def from_json(json: Any) -> RenameAnnotation: - annotation = AbstractAnnotation.from_json(json) + def from_dict(d: Any) -> RenameAnnotation: + annotation = AbstractAnnotation.from_dict(d) return RenameAnnotation( annotation.target, annotation.authors, annotation.reviewers, annotation.comment, annotation.reviewResult, - json["newName"], + d["newName"], ) @@ -449,13 +449,13 @@ class TodoAnnotation(AbstractAnnotation): newTodo: str # noqa: N815 @staticmethod - def from_json(json: Any) -> TodoAnnotation: - annotation = AbstractAnnotation.from_json(json) + def from_dict(d: Any) -> TodoAnnotation: + annotation = AbstractAnnotation.from_dict(d) return TodoAnnotation( annotation.target, annotation.authors, annotation.reviewers, annotation.comment, annotation.reviewResult, - json["newTodo"], + d["newTodo"], ) diff --git a/src/library_analyzer/processing/api/__init__.py b/src/library_analyzer/processing/api/__init__.py index 3ab51caf..b5b370d6 100644 --- a/src/library_analyzer/processing/api/__init__.py +++ b/src/library_analyzer/processing/api/__init__.py @@ -26,6 +26,7 @@ package_files, package_root, ) +from ._resolve_references import ClassScopeNode, MemberAccess, ScopeNode, get_scope __all__ = [ "DefinitelyImpure", @@ -50,4 +51,8 @@ "infer_purity", "package_files", "package_root", + "ScopeNode", + "MemberAccess", + "get_scope", + "ClassScopeNode", ] diff --git a/src/library_analyzer/processing/api/_ast_visitor.py b/src/library_analyzer/processing/api/_ast_visitor.py index 99ec02b2..3170558d 100644 --- a/src/library_analyzer/processing/api/_ast_visitor.py +++ b/src/library_analyzer/processing/api/_ast_visitor.py @@ -131,8 +131,9 @@ def leave_module(self, _: astroid.Module) -> None: self.api.add_module(module) def enter_classdef(self, class_node: astroid.ClassDef) -> None: + id_ = self.__get_id(class_node.name) qname = class_node.qname() - instance_attributes = get_instance_attributes(class_node) + instance_attributes = get_instance_attributes(class_node, id_) decorators: astroid.Decorators | None = class_node.decorators if decorators is not None: @@ -144,13 +145,13 @@ def enter_classdef(self, class_node: astroid.ClassDef) -> None: # Remember class, so we can later add methods class_ = Class( - id=self.__get_id(class_node.name), + id=id_, qname=qname, decorators=decorator_names, superclasses=class_node.basenames, is_public=self.is_public(class_node.name, qname), reexported_by=self.reexported.get(qname, []), - documentation=self.docstring_parser.get_class_documentation(class_node), + docstring=self.docstring_parser.get_class_documentation(class_node), code=code, instance_attributes=instance_attributes, ) @@ -197,7 +198,7 @@ def enter_functiondef(self, function_node: astroid.FunctionDef) -> None: results=[], # TODO: results is_public=is_public, reexported_by=self.reexported.get(qname, []), - documentation=self.docstring_parser.get_function_documentation(function_node), + docstring=self.docstring_parser.get_function_documentation(function_node), code=code, ) self.__declaration_stack.append(function) diff --git a/src/library_analyzer/processing/api/_extract_boundary_values.py b/src/library_analyzer/processing/api/_extract_boundary_values.py new file mode 100644 index 00000000..fbb9141e --- /dev/null +++ b/src/library_analyzer/processing/api/_extract_boundary_values.py @@ -0,0 +1,752 @@ +from dataclasses import dataclass, field +from typing import Any, TypeAlias + +from numpy import inf +from spacy.matcher import Matcher +from spacy.tokens import Doc, Span + +from library_analyzer.utils import load_language + +from .model import BoundaryType + +_Numeric: TypeAlias = int | float + + +@dataclass +class BoundaryList: + _boundaries: set[BoundaryType] = field(default_factory=set[BoundaryType]) + + def add_boundary(self, match_label: str, type_: str, match_string: Span = None) -> None: + """Add a boundary according to the matched rule. + + Parameters + ---------- + match_label + Label of the matched rule + type_ + Base type of the boundary to be created + match_string + Span containing the string matched by the corresponding rule. + This parameter is not required for every rule. + + """ + match match_label: + case "BOUNDARY_NON_POSITIVE": + self._boundaries.add(_create_non_positive_boundary(type_)) + case "BOUNDARY_POSITIVE": + self._boundaries.add(_create_positive_boundary(type_)) + case "BOUNDARY_NON_NEGATIVE": + self._boundaries.add(_create_non_negative_boundary(type_)) + case "BOUNDARY_NEGATIVE": + self._boundaries.add(_create_negative_boundary(type_)) + case "BOUNDARY_BETWEEN": + self._boundaries.add(_create_between_boundary(match_string, type_)) + case "BOUNDARY_INTERVAL": + self._boundaries.add(_create_interval_boundary(match_string, type_)) + case "BOUNDARY_AT_LEAST": + self._boundaries.add(_create_at_least_boundary(match_string, type_)) + case "BOUNDARY_INTERVAL_RELATIONAL": + self._boundaries.add(_create_interval_relational_boundary(match_string, type_)) + case "BOUNDARY_TYPE_REL_VAL": + self._boundaries.add(_create_type_rel_val_boundary(match_string, type_)) + case "BOUNDARY_INTERVAL_IN_BRACKETS": + self._boundaries.add(_create_interval_in_brackets_boundary(match_string, type_)) + + def get_boundaries(self) -> set[BoundaryType]: + return self._boundaries + + +type_funcs = {"float": float, "int": int} + +_nlp = load_language("en_core_web_sm") +_matcher = Matcher(_nlp.vocab) + +_geq_leq_op = [{"ORTH": {"IN": ["<", ">"]}}, {"ORTH": "="}] + +_boundary_at_least = [{"LOWER": "at"}, {"LOWER": "least"}, {"LIKE_NUM": True}] + +_boundary_min = [{"LOWER": "min"}, {"ORTH": "."}, {"LIKE_NUM": True}] + +_boundary_interval = [ + {"LOWER": {"IN": ["in", "within"]}}, + {"LOWER": "the", "OP": "?"}, + {"LOWER": {"IN": ["range", "interval"]}, "OP": "?"}, + {"LOWER": "of", "OP": "?"}, + {"ORTH": {"IN": ["(", "["]}}, + {}, + {"ORTH": ","}, + {}, + {"ORTH": {"IN": [")", "]"]}}, +] + + +_boundary_value_in = [ + {"LOWER": {"FUZZY": "value"}}, + {"LOWER": {"IN": ["is", "in"]}}, + {"ORTH": {"IN": ["(", "["]}}, + {}, + {"ORTH": ","}, + {}, + {"ORTH": {"IN": [")", "]"]}}, +] + + +_boundary_non_negative = [ + {"LOWER": {"IN": ["non", "not"]}}, + {"ORTH": {"IN": ["-", "_"]}, "OP": "?"}, + {"LOWER": "negative"}, +] + +_boundary_positive = [{"LOWER": "strictly", "OP": "?"}, {"LOWER": "positive"}] + +_boundary_non_positive = [ + {"LOWER": {"IN": ["non", "not"]}}, + {"ORTH": {"IN": ["-", "_"]}, "OP": "?"}, + {"LOWER": "positive"}, +] + +_boundary_negative = [{"LOWER": "strictly", "OP": "?"}, {"LOWER": "negative"}] + +_boundary_between = [{"LOWER": "between"}, {"LIKE_NUM": True}, {"LOWER": "and"}, {"LIKE_NUM": True}] + + +_boundary_gtlt_gtlt = [ + {"LIKE_NUM": True}, + {"ORTH": {"IN": ["<", ">"]}}, + {}, + {"ORTH": {"IN": ["<", ">"]}}, + {"LIKE_NUM": True}, +] + + +_boundary_geqleq_geqleq = [{"LIKE_NUM": True}, *_geq_leq_op, {}, *_geq_leq_op, {"LIKE_NUM": True}] + +_boundary_gtlt_geqleq = [{"LIKE_NUM": True}, {"ORTH": {"IN": ["<", ">"]}}, {}, *_geq_leq_op, {"LIKE_NUM": True}] + +_boundary_geqleq_gtlt = [{"LIKE_NUM": True}, *_geq_leq_op, {}, {"ORTH": {"IN": ["<", ">"]}}, {"LIKE_NUM": True}] + +_boundary_and_gtlt_gtlt = [ + {"ORTH": {"IN": ["<", ">"]}}, + {"LIKE_NUM": True}, + {"ORTH": {"IN": ["and", "or"]}}, + {"ORTH": {"IN": ["<", ">"]}}, + {"LIKE_NUM": True}, +] + +_boundary_and_geqleq_geqleq = [ + *_geq_leq_op, + {"LIKE_NUM": True}, + {"ORTH": {"IN": ["and", "or"]}}, + *_geq_leq_op, + {"LIKE_NUM": True}, +] + +_boundary_and_gtlt_geqleq = [ + {"ORTH": {"IN": ["<", ">"]}}, + {"LIKE_NUM": True}, + {"ORTH": {"IN": ["and", "or"]}}, + *_geq_leq_op, + {"LIKE_NUM": True}, +] + +_boundary_and_geqleq_gtlt = [ + *_geq_leq_op, + {"LIKE_NUM": True}, + {"ORTH": {"IN": ["and", "or"]}}, + {"ORTH": {"IN": ["<", ">"]}}, + {"LIKE_NUM": True}, +] + +_boundary_type = [{"LOWER": {"IN": ["float", "int"]}}] + +_boundary_type_gtlt_val = [*_boundary_type, {"ORTH": {"IN": ["<", ">"]}}, {"LIKE_NUM": True}] + +_boundary_type_geqleq_val = [*_boundary_type, *_geq_leq_op, {"LIKE_NUM": True}] + +_boundary_interval_in_brackets = [ + *_boundary_type, + {"ORTH": "("}, + {"ORTH": {"IN": ["(", "["]}}, + {}, + {"ORTH": ","}, + {}, + {"ORTH": {"IN": [")", "]"]}}, + {"ORTH": ")"}, +] + + +def _check_negative_pattern( + matcher: Matcher, # noqa: ARG001 + doc: Doc, # noqa: ARG001 + i: int, + matches: list[tuple[Any, ...]], +) -> Any | None: + """on-match function for the spaCy Matcher. + + Delete the BOUNDARY_NEGATIVE match if the BOUNDARY_NON_NEGATIVE rule has already been detected. + + Parameters + ---------- + matcher + Parameter is ignored. + doc + Parameter is ignored. + i + Index of the match that was recognized by the rule. + + matches + List of matches found by the matcher + + """ + previous_id, _, _ = matches[i - 1] + if _nlp.vocab.strings[previous_id] == "BOUNDARY_NON_NEGATIVE": + matches.remove(matches[i]) + + return None + + +def _check_positive_pattern( + matcher: Matcher, # noqa: ARG001 + doc: Doc, # noqa: ARG001 + i: int, + matches: list[tuple[Any, ...]], +) -> Any | None: + """on-match function for the spaCy Matcher. + + Delete the BOUNDARY_POSITIVE match if the BOUNDARY_NON_POSITIVE rule has already been detected. + + Parameters + ---------- + matcher + Parameter is ignored. + doc + Parameter is ignored. + i + Index of the match that was recognized by the rule. + + matches + List of matches found by the matcher + + """ + previous_id, _, _ = matches[i - 1] + if _nlp.vocab.strings[previous_id] == "BOUNDARY_NON_POSITIVE": + matches.remove(matches[i]) + + return None + + +def _check_interval_relational_pattern( + matcher: Matcher, # noqa: ARG001 + doc: Doc, # noqa: ARG001 + i: int, + matches: list[tuple[Any, ...]], +) -> Any | None: + """on-match function for the spaCy Matcher. + + Delete the BOUNDARY_TYPE_REL_VAL match if the BOUNDARY_INTERVAL_RELATIONAL rule has been detected. + + Parameters + ---------- + matcher + Parameter is ignored. + doc + Parameter is ignored. + i + Index of the match that was recognized by the rule. + + matches + List of matches found by the matcher + + """ + previous_id, _, _ = matches[i - 1] + if _nlp.vocab.strings[previous_id] == "BOUNDARY_TYPE_REL_VAL": + matches.remove(matches[i - 1]) + + return None + + +def _check_interval( + matcher: Matcher, # noqa: ARG001 + doc: Doc, # noqa: ARG001 + i: int, + matches: list[tuple[Any, ...]], +) -> Any | None: + """on-match function for the spaCy Matcher. + + Delete the BOUNDARY_INTERVAL match if the BOUNDARY_INTERVAL rule has been already detected. + + Parameters + ---------- + matcher + Parameter is ignored. + doc + Parameter is ignored. + i + Index of the match that was recognized by the rule. + + matches + List of matches found by the matcher + + """ + previous_id, _, _ = matches[i - 1] + if _nlp.vocab.strings[previous_id] == "BOUNDARY_INTERVAL" and (len(matches) > 1): + matches.remove(matches[i - 1]) + + return None + + +relational_patterns = [ + _boundary_gtlt_gtlt, + _boundary_geqleq_geqleq, + _boundary_geqleq_gtlt, + _boundary_gtlt_geqleq, + _boundary_and_gtlt_gtlt, + _boundary_and_geqleq_geqleq, + _boundary_and_geqleq_gtlt, + _boundary_and_gtlt_geqleq, +] + +_matcher.add("BOUNDARY_AT_LEAST", [_boundary_at_least, _boundary_min]) +_matcher.add("BOUNDARY_INTERVAL", [_boundary_interval, _boundary_value_in], on_match=_check_interval) +_matcher.add("BOUNDARY_POSITIVE", [_boundary_positive], on_match=_check_positive_pattern) +_matcher.add("BOUNDARY_NON_NEGATIVE", [_boundary_non_negative]) +_matcher.add("BOUNDARY_NEGATIVE", [_boundary_negative], on_match=_check_negative_pattern) +_matcher.add("BOUNDARY_NON_POSITIVE", [_boundary_non_positive]) +_matcher.add("BOUNDARY_BETWEEN", [_boundary_between]) +_matcher.add("BOUNDARY_INTERVAL_RELATIONAL", relational_patterns, on_match=_check_interval_relational_pattern) +_matcher.add("BOUNDARY_TYPE", [_boundary_type]) +_matcher.add("BOUNDARY_TYPE_REL_VAL", [_boundary_type_gtlt_val, _boundary_type_geqleq_val]) +_matcher.add("BOUNDARY_INTERVAL_IN_BRACKETS", [_boundary_interval_in_brackets]) + + +def _get_type_value(type_: str, value: _Numeric | str) -> _Numeric: + """Transform the passed value to the value matching type_. + + Parameters + ---------- + type_ + Type to be transformed to. + value + Value to be transformed. + + Returns + ------- + Numeric + Transformed value. + """ + return type_funcs[type_](value) + + +def _create_non_positive_boundary(type_: str) -> BoundaryType: + """Create a BoundaryType with predefined extrema. + + Create a BoundaryType that describes the non-positive value range of the given type. + + Parameters + ---------- + type_ + Base type of Boundary + + Returns + ------- + BoundaryType + + """ + return BoundaryType( + type_, + min=BoundaryType.NEGATIVE_INFINITY, + max=_get_type_value(type_, 0), + min_inclusive=False, + max_inclusive=True, + ) + + +def _create_positive_boundary(type_: str) -> BoundaryType: + """Create a BoundaryType with predefined extrema. + + Create a BoundaryType that describes the positive value range of the given type. + + Parameters + ---------- + type_ + Base type of Boundary + + Returns + ------- + BoundaryType + + """ + return BoundaryType( + type_, + min=_get_type_value(type_, 0), + max=BoundaryType.INFINITY, + min_inclusive=False, + max_inclusive=False, + ) + + +def _create_non_negative_boundary(type_: str) -> BoundaryType: + """Create a BoundaryType with predefined extrema. + + Create a BoundaryType that describes the non-negative value range of the given type. + + Parameters + ---------- + type_ + Base type of Boundary + + Returns + ------- + BoundaryType + + """ + return BoundaryType( + type_, + min=_get_type_value(type_, 0), + max=BoundaryType.INFINITY, + min_inclusive=True, + max_inclusive=False, + ) + + +def _create_negative_boundary(type_: str) -> BoundaryType: + """Create a BoundaryType with predefined extrema. + + Create a BoundaryType that describes the negative value range of the given type. + + Parameters + ---------- + type_ + Base type of Boundary + + Returns + ------- + BoundaryType + + """ + # return type_, ("negative infinity", False), (_get_type_value(type_, 0), False) + return BoundaryType( + type_, + min=BoundaryType.NEGATIVE_INFINITY, + max=_get_type_value(type_, 0), + min_inclusive=False, + max_inclusive=False, + ) + + +def _create_between_boundary(match_string: Span, type_: str) -> BoundaryType: + """Create a BoundaryType with individual extrema. + + Create a BoundaryType whose extrema are extracted from the passed match string. + + Parameters + ---------- + match_string + Match string containing the extrema of the value range. + type_ + Base type of Boundary + + Returns + ------- + BoundaryType + + """ + values = [] + for token in match_string: + if token.like_num: + values.append(_get_type_value(type_, token.text)) + return BoundaryType(type_, min=min(values), max=max(values), min_inclusive=True, max_inclusive=True) + + +def _create_at_least_boundary(match_string: Span, type_: str) -> BoundaryType: + """Create a BoundaryType with individual minimum. + + Create a BoundaryType whose minimum is extracted from the passed match string. + + Parameters + ---------- + match_string + Match string containing the minimum of the value range. + type_ + Base type of Boundary + + Returns + ------- + BoundaryType + + """ + value: _Numeric = 0 + for token in match_string: + if token.like_num: + value = _get_type_value(type_, token.text) + return BoundaryType(type_, min=value, max=BoundaryType.INFINITY, min_inclusive=True, max_inclusive=False) + + +def _create_interval_boundary(match_string: Span, type_: str) -> BoundaryType: + """Create a BoundaryType with individual extrema. + + Create a BoundaryType whose extrema are extracted from the passed match string. + + Parameters + ---------- + match_string + Match string containing the extrema of the value range. + type_ + Base type of Boundary + + Returns + ------- + BoundaryType + + """ + values = [] + brackets = [] + for token in match_string: + if token.text in ["(", "[", ")", "]"]: + brackets.append(token.text) + if token.like_num: + values.append(_get_type_value(type_, token.text)) + + if token.text in ["inf", "infty", "infinty"]: + values.append(inf) + elif token.text in ["negative inf", "negative infty", "negative infinity"]: + values.append(-inf) + + type_func = type_funcs[type_] + if -inf in values: + minimum = BoundaryType.NEGATIVE_INFINITY + min_incl = False + else: + minimum = type_func(min(values)) + min_incl = brackets[0] == "[" + + if inf in values: + maximum = BoundaryType.INFINITY + max_incl = False + else: + maximum = type_func(max(values)) + max_incl = brackets[1] == "]" + + return BoundaryType(type_, min=minimum, max=maximum, min_inclusive=min_incl, max_inclusive=max_incl) + + +def _create_interval_relational_boundary(match_string: Span, type_: str) -> BoundaryType: + """Create a BoundaryType with individual extrema. + + Create a BoundaryType whose extrema are extracted from the passed match string. + + Parameters + ---------- + match_string + Match string containing the extrema of the value range. + type_ + Base type of Boundary + + Returns + ------- + BoundaryType + + """ + relational_ops = [] + values = [] + and_or_found = False + + for token in match_string: + if token.text in ["<", ">"]: + relational_ops.append(token.text) + elif token.text == "=": + relational_ops[len(relational_ops) - 1] += token.text + elif token.like_num: + values.append(token.text) + elif token.text in ["and", "or"]: + and_or_found = True + type_func = type_funcs[type_] + + minimum = type_func(min(values)) + maximum = type_func(max(values)) + + if not and_or_found: + min_incl = (relational_ops[0] == "<=") or (relational_ops[1] == ">=") + max_incl = (relational_ops[1] == "<=") or (relational_ops[0] == ">=") + else: + min_incl = ">=" in relational_ops + max_incl = "<=" in relational_ops + + return BoundaryType(type_, min=minimum, max=maximum, min_inclusive=min_incl, max_inclusive=max_incl) + + +def _create_type_rel_val_boundary(match_string: Span, type_: str) -> BoundaryType: + """Create a BoundaryType with individual minimum or maximum. + + Create a BoundaryType whose minimum or maximum is extracted from the passed match string. + + Parameters + ---------- + match_string + Match string containing the extrema of the value range. + type_ + Base type of Boundary + + Returns + ------- + BoundaryType + + """ + val: _Numeric = 0 + min_: _Numeric | str = 0 + max_: _Numeric | str = 0 + + rel_op = "" + type_func = type_funcs[type_] + min_incl = False + max_incl = False + + for token in match_string: + if token.like_num: + val = type_func(token.text) + if token.text in [">", "<", "="]: + rel_op += token.text + + # type (< | <=) val + if rel_op in ["<", "<="]: + min_ = BoundaryType.NEGATIVE_INFINITY + max_ = val + if rel_op == "<=": + max_incl = True + + # type (> | >=) val + elif rel_op in [">", ">="]: + min_ = val + max_ = BoundaryType.INFINITY + if rel_op == ">=": + min_incl = True + + return BoundaryType(type_, min=min_, max=max_, min_inclusive=min_incl, max_inclusive=max_incl) + + +def _create_interval_in_brackets_boundary(match_string: Span, type_: str) -> BoundaryType: + span_ = match_string[2:-1] + + return _create_interval_boundary(span_, type_) + + +def _analyze_matches(matches: list[tuple[str, Span]], boundaries: BoundaryList) -> None: + """Analyze the passed match list for boundaries to be created. + + Parameters + ---------- + matches + Matches found by spaCy Matcher. + + boundaries + BoundaryList object that creates and contains the matching boundary objects. + + """ + type_id = 0 + other_id = 0 + processed_matches = [] + found_type = False + + # Assignment of the found boundaries to the corresponding data type + for match_label, match_string in matches: + if match_label == "BOUNDARY_TYPE": + if found_type: + other_id += 1 + processed_matches.append({"id": type_id, "match_label": match_label, "match_string": match_string}) + type_id += 1 + found_type = True + + else: + processed_matches.append({"id": other_id, "match_label": match_label, "match_string": match_string}) + other_id += 1 + if found_type: + found_type = False + + # Creation of the matching BoundaryTypes + for i in range(max(type_id, other_id)): + same_id = [match for match in processed_matches if match["id"] == i] + if len(same_id) == 2: + type_ = "" + match_string = "" + match_label = "" + + for match in same_id: + if match["match_label"] == "BOUNDARY_TYPE": + type_ = match["match_string"].text + else: + match_label = match["match_label"] + match_string = match["match_string"] + + boundaries.add_boundary(match_label, type_, match_string) + + +def extract_boundary(description: str, type_string: str) -> set[BoundaryType]: + """Extract valid BoundaryTypes. + + Extract valid BoundaryTypes described by predefined rules. + + Parameters + ---------- + description + Description string of the parameter to be examined. + + type_string + Type string of the parameter to be examined. + + Returns + ------- + set[BoundaryType] + A set containing valid BoundaryTypes. + """ + boundaries = BoundaryList() + + type_doc = _nlp(type_string) + type_matches = _matcher(type_doc) + type_matches = [(_nlp.vocab.strings[match_id], type_doc[start:end]) for match_id, start, end in type_matches] + + description_doc = _nlp(description) + desc_matches = _matcher(description_doc) + desc_matches = [(_nlp.vocab.strings[match_id], description_doc[start:end]) for match_id, start, end in desc_matches] + + if type_matches: + type_list = [] # Possible numeric data types that may be used with the parameter to be examined. + restriction_list = [] # Restrictions of the type such as non-negative + match_label = "" + + for match in type_matches: + if match[0] == "BOUNDARY_TYPE": + type_list.append(match[1].text) + else: + restriction_list.append(match) + + type_length = len(type_list) + + # If the length of the found types is 1, the boundary type is described only in the type string + # and the value range only in the description string. + + if type_length == 1: + type_text = type_list[0] + match_string: Span | None = None + + if len(restriction_list) == 1: + match_label = restriction_list[0][0] + match_string = restriction_list[0][1] + + # Checking the description for boundaries if no restriction was found in the type string + elif len(desc_matches) > 0: + match_label, match_string = desc_matches[0] + if match_label == "BOUNDARY_TYPE": + type_text = match_string.text + match_label, match_string = desc_matches[1] + + boundaries.add_boundary(match_label, type_text, match_string) + + elif type_length > 1: + found_type_rel_val = any(match[0] == "BOUNDARY_TYPE_REL_VAL" for match in type_matches) + + if found_type_rel_val: + _analyze_matches(type_matches, boundaries) + else: + _analyze_matches(desc_matches, boundaries) + + return boundaries.get_boundaries() diff --git a/src/library_analyzer/processing/api/_extract_valid_values.py b/src/library_analyzer/processing/api/_extract_valid_values.py index 13050272..70f420d2 100644 --- a/src/library_analyzer/processing/api/_extract_valid_values.py +++ b/src/library_analyzer/processing/api/_extract_valid_values.py @@ -1,203 +1,257 @@ -import re -from dataclasses import dataclass, field +from dataclasses import dataclass +from typing import Any +import spacy +from spacy import Language +from spacy.matcher import Matcher +from spacy.tokens import Doc, Span -@dataclass -class Configuration: - _function_list: list = field(default_factory=list) +_enum_valid_values_are = [{"LOWER": "valid"}, {"LOWER": "values"}, {"LOWER": "are"}] - def get_function_list(self) -> list: - return self._function_list +_enum_when_set_to = [{"LOWER": "when"}, {"LOWER": "set"}, {"LOWER": "to"}] +_enum_if_listing = [{"LOWER": "if"}, {"ORTH": {"IN": [",", '"']}, "OP": "?"}] -@dataclass -class DescriptionStringConfiguration(Configuration): - if_listings: bool = True - indented_listings: bool = True - when_set_to: bool = True +_enum_type_curly = [{"ORTH": "{"}, {"OP": "+"}, {"ORTH": "}"}] - def __post_init__(self) -> None: - if self.if_listings: - self._function_list.append(_extract_from_description_if_listing) - if self.indented_listings: - self._function_list.append(_extract_from_description_indented_listing) - if self.when_set_to: - self._function_list.append(_extract_from_description_when_set_to) +_enum_str = [{"LOWER": "str"}] +_enum_single_val_bool_none = [{"ORTH": {"IN": ["True", "False", "None"]}}, {"ORTH": ":"}] -@dataclass -class TypeStringConfiguration(Configuration): - curly_enum: bool = True - and_or_enum: bool = True - - def __post_init__(self) -> None: - if self.curly_enum: - self._function_list.append(_extract_from_type_curly_enum) - if self.and_or_enum: - self._function_list.append(_extract_from_type_listing) +_enum_single_val_quoted = [{"ORTH": {"IN": ["'", '"']}}, {"OP": "+"}, {"ORTH": {"IN": ["'", '"']}}, {"ORTH": ":"}] -def extract_valid_literals(param_description: str, param_type: str) -> set[str]: - """ - Extract all valid literals from the type and description string. +@dataclass +class MatcherConfiguration: + _nlp: Language = None + _matcher: Matcher = None - Parameters - ---------- - param_description: str - Description string of the parameter to be examined. + # Rules to be checked + when_set_to: bool = True + valid_values_are: bool = True + type_curly: bool = True + if_listings: bool = True + single_vals: bool = True - param_type: str - Type string of the parameter to be examined. + def __post_init__(self) -> None: + self._nlp = spacy.load("en_core_web_sm") + self._matcher = Matcher(self._nlp.vocab) + self._matcher.add("ENUM_STR", [_enum_str]) - Returns - ------- - set[str] - A set of valid, extracted values of the parameter to be examined. - """ - description_config: DescriptionStringConfiguration = DescriptionStringConfiguration() - type_config: TypeStringConfiguration = TypeStringConfiguration() - none_and_bool = {"False", "None", "True"} + if self.when_set_to: + self._matcher.add("ENUM_SINGLE_VAL", [_enum_when_set_to], on_match=_extract_single_value) + if self.if_listings: + self._matcher.add("ENUM_SINGLE_VAL", [_enum_if_listing], on_match=_extract_single_value) + if self.valid_values_are: + self._matcher.add("ENUM_VALID_VALUES_ARE", [_enum_valid_values_are], on_match=_extract_list) + if self.type_curly: + self._matcher.add("ENUM_TYPE_CURLY", [_enum_type_curly], on_match=_extract_list) + if self.single_vals: + self._matcher.add( + "ENUM_SINGLE_VALS", + [_enum_single_val_quoted, _enum_single_val_bool_none], + on_match=_extract_indented_single_value, + ) - def _execute_pattern(string: str, config: Configuration) -> set[str]: - # Function to execute all pattern functions from config - result = set() - for pattern_function in config.get_function_list(): - result.update(pattern_function(string)) - return result + def get_matcher(self) -> Matcher: + return self._matcher - matches = _execute_pattern(param_type, type_config) + def get_nlp(self) -> Language: + return self._nlp - description_matches = _execute_pattern(param_description, description_config) - # Check if there are matching values in the description that are not True, False or None - # when 'str' occurs in the type string. If this is not the case, unlistable_str is returned as a 'valid' value. - if description_matches: - matches.update(description_matches) - if "str" in matches: - if not description_matches.difference(none_and_bool): - matches.add("unlistable_str") - matches.remove("str") +_extracted = [] - return matches +def _extract_list( + nlp_matcher: Matcher, # noqa: ARG001 + doc: Doc, + i: int, # noqa: ARG001 + nlp_matches: list[tuple[Any, ...]], # noqa: ARG001 +) -> Any | None: + """on-match function for the spaCy Matcher. -def _extract_from_type_curly_enum(type_string: str) -> set[str]: - """ - Extract all valid values of the parameter type string to be examined that were enclosed in curly braces. + Extract the first collection of valid string values that occurs after the matched string. Parameters ---------- - type_string: str - Type string of the parameter to be examined. + nlp_matcher + Parameter is ignored. + doc + Doc object that is checked for the active rules. + i + Parameter is ignored. + nlp_matches + List of matches found by the matcher - Returns - ------- - set[str] - A set of valid values from the parameter description to be examined. """ - matches = re.findall(r"\{(.*?)}", type_string) - extracted = [] - - for match in matches: - splitted = re.split(r", ", match) - extracted.extend(splitted) - - return set(extracted) + found_list = False + found_minus = False + ex = [] + + for token in doc: + if token.text in ["[", "{"]: + found_list = True + elif token.text in ["]", "}"]: + break + + if found_list and token.text not in ["'", '"', ",", "[", "{", "}"]: + if token.text in ["True", "False"]: + ex.append("True") + ex.append("False") + else: + if token.text in ["-", "_", " "]: + found_minus = True + ex[len(ex) - 1] += token.text + continue + elif found_minus: + ex[len(ex) - 1] += token.text + found_minus = False + continue + + ex.append(token.text) + + ex = ['"' + x + '"' for x in ex] + _extracted.extend(ex) + + return None + + +def _extract_single_value( + nlp_matcher: Matcher, # noqa: ARG001 + doc: Doc, + i: int, + nlp_matches: list[tuple[Any, ...]], +) -> Any | None: + """on-match function for the spaCy Matcher. + + Extract the first value that occurs after the matched string. + Parameters + ---------- + nlp_matcher + Parameter is ignored. + doc + Doc object that is checked for the active rules. + i + Index of the match that was recognized by the rule. + nlp_matches + List of matches found by the matcher. -def _extract_from_type_listing(type_string: str) -> set[str]: """ - Extract all valid values from the listing of the parameter type string to be examined. + _, _, end = nlp_matches[i] + next_token = doc[end] + text = "" + + if next_token.text in ["True", "False"]: + _extracted.append("True") + _extracted.append("False") + elif next_token.text == "None": + _extracted.append("None") + elif next_token.text in ["'", '"']: + for token in doc[end + 1 :]: + if token.text in ["'", '"']: + break + else: + text += token.text + _extracted.append('"' + text + '"') + + return None + + +def _extract_indented_single_value( + nlp_matcher: Matcher, # noqa: ARG001 + doc: Doc, + i: int, + nlp_matches: list[tuple[Any, ...]], +) -> Any | None: + """on-match function for the spaCy Matcher. + + Extract the standalone indented values. Parameters ---------- - type_string: str - Type string of the parameter to be examined. + nlp_matcher + Parameter is ignored. + doc + Doc object that is checked for the active rules. + i + Index of the match that was recognized by the rule. + nlp_matches + List of matches found by the matcher. - Returns - ------- - set[str] - A set of valid values from the parameter description to be examined. """ - # Multiple values seperated by ',', 'and' or 'or' with single# quotes - single_and_or_pattern = r"('[^']*'|bool|str)\s*(?:and|or|,)?" - # Multiple values seperated by ',', 'and' or'or' with double quotes - double_and_or_pattern = r"(\"[^\"]*\"|bool|str)\s*(?:and|or|,)?" + _, start, end = nlp_matches[i] + value = doc[start : end - 1] + value = value.text - matches = re.findall(single_and_or_pattern, type_string) + if value[0] in ["'", '"']: + value = value.replace("'", '"') - if not matches: - matches = re.findall(double_and_or_pattern, type_string) + _extracted.append(value) - extracted = set(matches) + return None - if "bool" in extracted: - extracted.remove("bool") - extracted.add("False") - extracted.add("True") - return extracted - - -def _extract_from_description_if_listing(description: str) -> set[str]: - """Extract the 'if listing' pattern. - - Detect all substrings starting with 'if' and satisfying one of the following cases: - A value between single or double quotes, False, True, or None. +def _nlp_matches_to_readable_matches( + nlp_matches: list[tuple[int, int, int]], + nlp_: Language, + doc_: Doc, +) -> list[tuple[str, Span]]: + """Transform the matches list into a readable list. Parameters ---------- - description: str - Description string of the parameter to be examined. + nlp_matches + list of spaCy matches + nlp_ + spaCy natural language pipeline + doc_ + Doc object that is checked for the active rules. - Returns - ------- - set[str] - A set of valid values from the parameter description to be examined. """ - pattern = r"[-\+\*]?\s*If\s*('[^']*'|\"[^\"]*\"|True|False|None)" - matches = re.findall(pattern, description) - return set(matches) + return [(nlp_.vocab.strings[match_id], doc_[start:end]) for match_id, start, end in nlp_matches] -def _extract_from_description_indented_listing(description: str) -> set[str]: - """Extract the 'indented listing' pattern. - - Detect all substrings that appear in an indented list and match one of the following cases: - A value between single or double quotes, False, True, or None. +def extract_valid_literals(description: str, type_string: str) -> set[str]: + """Extract all valid literals. Parameters ---------- - description: str + description Description string of the parameter to be examined. + type_string + Type string of the prameter to be examined. Returns ------- set[str] - A set of valid values from the parameter description to be examined. + Set of extracted literals. + """ - pattern = r"[-\+\*]?\s+(\"[^\"]*\"|'[^']*'|None|True|False):" - matches = re.findall(pattern, description) - return set(matches) + _extracted.clear() + nlp = MATCHER_CONFIG.get_nlp() + matcher = MATCHER_CONFIG.get_matcher() -def _extract_from_description_when_set_to(description: str) -> set[str]: - """Extract the 'when set to' pattern. + none_and_bool = {"False", "None", "True"} - Detect all substrings starting with 'when set to' and satisfying one of the following cases: - A value between single or double quotes, False, True, or None. + desc_doc = nlp.make_doc(" ".join(description.split())) + type_doc = nlp.make_doc(type_string) - Parameters - ---------- - Description string of the parameter to be examined. + matcher(desc_doc) - Returns - ------- - set[str] - A set of valid literals from the parameter description to be examined. - """ - pattern = r"When set to (\"[^\"]*\"|'[^']*'|None|True|False)" - matches = re.findall(pattern, description, re.IGNORECASE) - return set(matches) + type_matches = matcher(type_doc) + type_matches = _nlp_matches_to_readable_matches(type_matches, nlp, type_doc) + + extracted_set = set(_extracted) + + if any(x[0] == "ENUM_STR" for x in type_matches) and not extracted_set.difference(none_and_bool): + extracted_set.add("unlistable_str") + + return extracted_set + + +MATCHER_CONFIG = MatcherConfiguration() diff --git a/src/library_analyzer/processing/api/_get_instance_attributes.py b/src/library_analyzer/processing/api/_get_instance_attributes.py index d98d9d88..a168c615 100644 --- a/src/library_analyzer/processing/api/_get_instance_attributes.py +++ b/src/library_analyzer/processing/api/_get_instance_attributes.py @@ -7,7 +7,7 @@ from library_analyzer.processing.api.model import Attribute, NamedType, UnionType -def get_instance_attributes(class_node: astroid.ClassDef) -> list[Attribute]: +def get_instance_attributes(class_node: astroid.ClassDef, class_id: str) -> list[Attribute]: attributes = [] for name, assignments in class_node.instance_attrs.items(): types = set() @@ -25,7 +25,13 @@ def get_instance_attributes(class_node: astroid.ClassDef) -> list[Attribute]: except astroid.InferenceError: pass - if isinstance(assignment, astroid.AssignAttr) and isinstance(assignment.parent, astroid.Assign): + if isinstance(assignment, astroid.AssignAttr) and isinstance(assignment.parent, astroid.AnnAssign): + annotation = assignment.parent.annotation + if annotation is not None and isinstance(annotation, astroid.Attribute | Name | Subscript): + types_, remove_types_ = get_type_from_type_hint(annotation) + types = types.union(types_) + remove_types = remove_types.union(remove_types_) + elif isinstance(assignment, astroid.AssignAttr) and isinstance(assignment.parent, astroid.Assign): attribute_type = _get_type_of_attribute(next(astroid.inference.infer_attribute(self=assignment))) if attribute_type is not None: types.add(attribute_type) @@ -46,47 +52,52 @@ def get_instance_attributes(class_node: astroid.ClassDef) -> list[Attribute]: and init_function.args.args[i].name == parameter_name ): type_hint = init_function.args.annotations[i] - if type_hint is not None: - if isinstance(type_hint, Name): - types.add(type_hint.name) - elif isinstance(type_hint, astroid.Attribute): - types.add(type_hint.attrname) - elif ( - isinstance(type_hint, Subscript) - and isinstance(type_hint.value, Name) - and isinstance(type_hint.slice, Name) - ): - value = type_hint.value.name - slice_name = type_hint.slice.name - if value == "Optional": - types.add("NoneType") - types.add(slice_name) - else: - types.add(value + "[" + slice_name + "]") - remove_types.add(value) - remove_types.add(value.lower()) - elif ( - isinstance(type_hint, Subscript) - and isinstance(type_hint.value, Name) - and isinstance(type_hint.slice, astroid.Tuple) - and type_hint.value.name == "Union" - ): - for type_name in type_hint.slice.elts: - if isinstance(type_name, Name): - types.add(type_name.name) - remove_types.add(type_hint.value.name) - remove_types.add(type_hint.value.name.lower()) + if type_hint is not None and isinstance(type_hint, Attribute | Name | Subscript): + types_, remove_types_ = get_type_from_type_hint(type_hint) + types = types.union(types_) + remove_types = remove_types.union(remove_types_) break types = types - remove_types if len(types) == 1: - attributes.append(Attribute(name, NamedType(types.pop()))) + attributes.append(Attribute(f"{class_id}/{name}", name, NamedType(types.pop()))) elif len(types) > 1: - attributes.append(Attribute(name, UnionType([NamedType(type_) for type_ in types]))) + attributes.append(Attribute(f"{class_id}/{name}", name, UnionType([NamedType(type_) for type_ in types]))) else: - attributes.append(Attribute(name, None)) + attributes.append(Attribute(f"{class_id}/{name}", name, None)) return attributes +def get_type_from_type_hint(type_hint: astroid.Attribute | Name | Subscript) -> tuple[set, set]: + types = set() + remove_types = set() + if isinstance(type_hint, Name): + types.add(type_hint.name) + elif isinstance(type_hint, astroid.Attribute): + types.add(type_hint.attrname) + elif isinstance(type_hint, Subscript) and isinstance(type_hint.value, Name) and isinstance(type_hint.slice, Name): + value = type_hint.value.name + slice_name = type_hint.slice.name + if value == "Optional": + types.add("NoneType") + types.add(slice_name) + else: + types.add(value + "[" + slice_name + "]") + remove_types.add(value) + remove_types.add(value.lower()) + elif ( + isinstance(type_hint, Subscript) + and isinstance(type_hint.value, Name) + and isinstance(type_hint.slice, astroid.Tuple) + and type_hint.value.name == "Union" + ): + for type_name in type_hint.slice.elts: + if isinstance(type_name, Name): + types.add(type_name.name) + remove_types.add(type_hint.value.name) + remove_types.add(type_hint.value.name.lower()) + return types, remove_types + + def _get_type_of_attribute(infered_value: Any) -> str | None: if infered_value == astroid.Uninferable: return None diff --git a/src/library_analyzer/processing/api/_get_parameter_list.py b/src/library_analyzer/processing/api/_get_parameter_list.py index 9e908d74..98f76143 100644 --- a/src/library_analyzer/processing/api/_get_parameter_list.py +++ b/src/library_analyzer/processing/api/_get_parameter_list.py @@ -27,7 +27,7 @@ def get_parameter_list( default_value=_get_stringified_default_value(function_node, parameter_name), assigned_by=parameter_assigned_by, is_public=function_is_public, - documentation=docstring_parser.get_parameter_documentation( + docstring=docstring_parser.get_parameter_documentation( function_node, parameter_name, parameter_assigned_by, diff --git a/src/library_analyzer/processing/api/_resolve_references.py b/src/library_analyzer/processing/api/_resolve_references.py new file mode 100644 index 00000000..4368f82c --- /dev/null +++ b/src/library_analyzer/processing/api/_resolve_references.py @@ -0,0 +1,225 @@ +from __future__ import annotations + +from dataclasses import dataclass, field + +import astroid + +from library_analyzer.processing.api.model import Expression, Reference +from library_analyzer.utils import ASTWalker + + +@dataclass +class MemberAccess(Expression): + expression: astroid.NodeNG + value: MemberAccess | Reference + parent: astroid.NodeNG | None = field(default=None) + + +@dataclass +class ScopeNode: + """Represents a node in the scope tree. + + The scope tree is a tree that represents the scope of a module. It is used to determine the scope of a reference. + On the top level, there is a ScopeNode for the module. Each ScopeNode has a list of children, which are the nodes + that are defined in the scope of the node. Each ScopeNode also has a reference to its parent node. + + Attributes + ---------- + node is the node in the AST that defines the scope of the node. + children is a list of ScopeNodes that are defined in the scope of the node, is None if the node is a leaf node. + parent is the parent node in the scope tree, is None if the node is the root node. + """ + + node: astroid.Module | astroid.FunctionDef | astroid.ClassDef | astroid.AssignName | astroid.AssignAttr | astroid.Attribute | astroid.Call | astroid.Import | astroid.ImportFrom | MemberAccess + children: list[ScopeNode | ClassScopeNode] + parent: ScopeNode | ClassScopeNode | None = None + + +@dataclass +class ClassScopeNode(ScopeNode): + """Represents a ScopeNode that defines the scope of a class. + + Attributes + ---------- + class_variables is a list of AssignName nodes that define class variables + instance_variables is a list of AssignAttr nodes that define instance variables + """ + + class_variables: list[astroid.AssignName] = field(default_factory=list) + instance_variables: list[astroid.AssignAttr] = field(default_factory=list) + + +@dataclass +class ScopeFinder: + """ + A ScopeFinder instance is used to find the scope of a reference. + + The scope of a reference is the node in the scope tree that defines the reference. + It is determined by walking the AST and checking if the reference is defined in the scope of the current node. + + Attributes + ---------- + current_node_stack stack of nodes that are currently visited by the ASTWalker . + children: All found children nodes are stored in children until their scope is determined. + """ + + current_node_stack: list[ScopeNode | ClassScopeNode] = field(default_factory=list) + children: list[ScopeNode | ClassScopeNode] = field(default_factory=list) + + def get_node_by_name(self, name: str) -> ScopeNode | ClassScopeNode | None: + """ + Get a ScopeNode by its name. + + Parameters + ---------- + name is the name of the node that should be found. + + Returns + ------- + The ScopeNode with the given name, or None if no node with the given name was found. + """ + for node in self.current_node_stack: + if node.node.name == name: + return node + return None + # TODO: this is inefficient, instead use a dict to store the nodes + + def detect_scope(self, node: astroid.NodeNG) -> None: + """ + Detect the scope of the given node. + + Detecting the scope of a node means finding the node in the scope tree that defines the scope of the given node. + The scope of a node is defined by the parent node in the scope tree. + """ + current_scope = node + outer_scope_children: list[ScopeNode | ClassScopeNode] = [] + inner_scope_children: list[ScopeNode | ClassScopeNode] = [] + for child in self.children: + if ( + child.parent is not None and child.parent.node != current_scope + ): # check if the child is in the scope of the current node + outer_scope_children.append(child) # add the child to the outer scope + else: + inner_scope_children.append(child) # add the child to the inner scope + + self.current_node_stack[-1].children = inner_scope_children # set the children of the current node + self.children = outer_scope_children # keep the children that are not in the scope of the current node + self.children.append(self.current_node_stack[-1]) # add the current node to the children + self.current_node_stack.pop() # remove the current node from the stack + + def analyze_constructor(self, node: astroid.FunctionDef) -> None: + """Analyze the constructor of a class. + + The constructor of a class is a special function that is called when an instance of the class is created. + This function only is called when the name of the FunctionDef node is `__init__`. + """ + # add instance variables to the instance_variables list of the class + for child in node.body: + class_node = self.get_node_by_name(node.parent.name) + + if isinstance(class_node, ClassScopeNode): + if isinstance(child, astroid.Assign): + class_node.instance_variables.append(child.targets[0]) + elif isinstance(child, astroid.AnnAssign): + class_node.instance_variables.append(child.target) + else: + raise TypeError(f"Unexpected node type {type(child)}") + + def enter_module(self, node: astroid.Module) -> None: + """ + Enter a module node. + + The module node is the root node, so it has no parent (parent is None). + The module node is also the first node that is visited, so the current_node_stack is empty before entering the module node. + """ + self.current_node_stack.append( + ScopeNode(node=node, children=[], parent=None), + ) + + def leave_module(self, node: astroid.Module) -> None: + self.detect_scope(node) + + def enter_classdef(self, node: astroid.ClassDef) -> None: + self.current_node_stack.append( + ClassScopeNode( + node=node, + children=[], + parent=self.current_node_stack[-1], + instance_variables=[], + class_variables=[], + ), + ) + + def leave_classdef(self, node: astroid.ClassDef) -> None: + self.detect_scope(node) + + def enter_functiondef(self, node: astroid.FunctionDef) -> None: + self.current_node_stack.append( + ScopeNode(node=node, children=[], parent=self.current_node_stack[-1]), + ) + if node.name == "__init__": + self.analyze_constructor(node) + + def leave_functiondef(self, node: astroid.FunctionDef) -> None: + self.detect_scope(node) + + def enter_assignname(self, node: astroid.AssignName) -> None: + if isinstance(node.parent, astroid.Arguments) and node.name == "self": + pass # TODO: Special treatment for self parameter + + elif isinstance( + node.parent, + astroid.Assign + | astroid.Arguments + | astroid.AssignAttr + | astroid.Attribute + | astroid.AugAssign + | astroid.AnnAssign, + ): + parent = self.current_node_stack[-1] + scope_node = ScopeNode(node=node, children=[], parent=parent) + self.children.append(scope_node) + + # add class variables to the class_variables list of the class + if isinstance(node.parent.parent, astroid.ClassDef): + class_node = self.get_node_by_name(node.parent.parent.name) + if isinstance(class_node, ClassScopeNode): + class_node.class_variables.append(node) + + def enter_assignattr(self, node: astroid.AssignAttr) -> None: + parent = self.current_node_stack[-1] + scope_node = ScopeNode(node=node, children=[], parent=parent) + self.children.append(scope_node) + + def enter_import(self, node: astroid.Import) -> None: + parent = self.current_node_stack[-1] + scope_node = ScopeNode(node=node, children=[], parent=parent) + self.children.append(scope_node) + + def enter_importfrom(self, node: astroid.ImportFrom) -> None: + parent = self.current_node_stack[-1] + scope_node = ScopeNode(node=node, children=[], parent=parent) + self.children.append(scope_node) + + +def get_scope(code: str) -> list[ScopeNode | ClassScopeNode]: + """Get the scope of the given code. + + In order to get the scope of the given code, the code is parsed into an AST and then walked by an ASTWalker. + The ASTWalker detects the scope of each node and builds a scope tree by using an instance of ScopeFinder. + + Returns + ------- + scopes: list of ScopeNode instances that represent the scope tree of the given code. + variables: list of class variables and list of instance variables for all classes in the given code. + """ + scope_handler = ScopeFinder() + walker = ASTWalker(scope_handler) + module = astroid.parse(code) + walker.walk(module) + + scopes = scope_handler.children # get the children of the root node, which are the scopes of the module + scope_handler.children = [] # reset the children + scope_handler.current_node_stack = [] # reset the stack + + return scopes diff --git a/src/library_analyzer/processing/api/docstring_parsing/__init__.py b/src/library_analyzer/processing/api/docstring_parsing/__init__.py index af8c8a42..9d63bdee 100644 --- a/src/library_analyzer/processing/api/docstring_parsing/__init__.py +++ b/src/library_analyzer/processing/api/docstring_parsing/__init__.py @@ -1,11 +1,13 @@ """Parsing docstrings into a common format.""" -from ._abstract_documentation_parser import AbstractDocstringParser +from ._abstract_docstring_parser import AbstractDocstringParser from ._create_docstring_parser import create_docstring_parser from ._docstring_style import DocstringStyle from ._epydoc_parser import EpydocParser +from ._googledoc_parser import GoogleDocParser from ._numpydoc_parser import NumpyDocParser from ._plaintext_docstring_parser import PlaintextDocstringParser +from ._restdoc_parser import RestDocParser __all__ = [ "AbstractDocstringParser", @@ -13,5 +15,7 @@ "DocstringStyle", "EpydocParser", "NumpyDocParser", + "GoogleDocParser", + "RestDocParser", "PlaintextDocstringParser", ] diff --git a/src/library_analyzer/processing/api/docstring_parsing/_abstract_documentation_parser.py b/src/library_analyzer/processing/api/docstring_parsing/_abstract_docstring_parser.py similarity index 78% rename from src/library_analyzer/processing/api/docstring_parsing/_abstract_documentation_parser.py rename to src/library_analyzer/processing/api/docstring_parsing/_abstract_docstring_parser.py index 5e9fbc4e..5cf86d51 100644 --- a/src/library_analyzer/processing/api/docstring_parsing/_abstract_documentation_parser.py +++ b/src/library_analyzer/processing/api/docstring_parsing/_abstract_docstring_parser.py @@ -7,20 +7,20 @@ import astroid from library_analyzer.processing.api.model import ( - ClassDocumentation, - FunctionDocumentation, + ClassDocstring, + FunctionDocstring, ParameterAssignment, - ParameterDocumentation, + ParameterDocstring, ) class AbstractDocstringParser(ABC): @abstractmethod - def get_class_documentation(self, class_node: astroid.ClassDef) -> ClassDocumentation: + def get_class_documentation(self, class_node: astroid.ClassDef) -> ClassDocstring: pass @abstractmethod - def get_function_documentation(self, function_node: astroid.FunctionDef) -> FunctionDocumentation: + def get_function_documentation(self, function_node: astroid.FunctionDef) -> FunctionDocstring: pass @abstractmethod @@ -29,5 +29,5 @@ def get_parameter_documentation( function_node: astroid.FunctionDef, parameter_name: str, parameter_assigned_by: ParameterAssignment, - ) -> ParameterDocumentation: + ) -> ParameterDocstring: pass diff --git a/src/library_analyzer/processing/api/docstring_parsing/_create_docstring_parser.py b/src/library_analyzer/processing/api/docstring_parsing/_create_docstring_parser.py index c2d49741..21eb1f01 100644 --- a/src/library_analyzer/processing/api/docstring_parsing/_create_docstring_parser.py +++ b/src/library_analyzer/processing/api/docstring_parsing/_create_docstring_parser.py @@ -4,17 +4,23 @@ from ._docstring_style import DocstringStyle from ._epydoc_parser import EpydocParser +from ._googledoc_parser import GoogleDocParser from ._numpydoc_parser import NumpyDocParser from ._plaintext_docstring_parser import PlaintextDocstringParser +from ._restdoc_parser import RestDocParser if TYPE_CHECKING: - from ._abstract_documentation_parser import AbstractDocstringParser + from ._abstract_docstring_parser import AbstractDocstringParser def create_docstring_parser(style: DocstringStyle) -> AbstractDocstringParser: - if style == DocstringStyle.NUMPY: - return NumpyDocParser() if style == DocstringStyle.EPYDOC: return EpydocParser() - else: # TODO: cover other cases + if style == DocstringStyle.GOOGLE: + return GoogleDocParser() + if style == DocstringStyle.NUMPY: + return NumpyDocParser() + if style == DocstringStyle.REST: + return RestDocParser() + else: return PlaintextDocstringParser() diff --git a/src/library_analyzer/processing/api/docstring_parsing/_docstring_style.py b/src/library_analyzer/processing/api/docstring_parsing/_docstring_style.py index 6edb137d..474749b9 100644 --- a/src/library_analyzer/processing/api/docstring_parsing/_docstring_style.py +++ b/src/library_analyzer/processing/api/docstring_parsing/_docstring_style.py @@ -5,11 +5,11 @@ class DocstringStyle(Enum): # AUTO = "auto", - PLAINTEXT = ("plaintext",) - # REST = "reST", - NUMPY = ("numpy",) - # GOOGLE = "google", + PLAINTEXT = "plaintext" EPYDOC = "epydoc" + GOOGLE = "google" + NUMPY = "numpy" + REST = "rest" def __str__(self) -> str: return self.name diff --git a/src/library_analyzer/processing/api/docstring_parsing/_epydoc_parser.py b/src/library_analyzer/processing/api/docstring_parsing/_epydoc_parser.py index e95d0932..361b0825 100644 --- a/src/library_analyzer/processing/api/docstring_parsing/_epydoc_parser.py +++ b/src/library_analyzer/processing/api/docstring_parsing/_epydoc_parser.py @@ -3,13 +3,13 @@ from docstring_parser import parse as parse_docstring from library_analyzer.processing.api.model import ( - ClassDocumentation, - FunctionDocumentation, + ClassDocstring, + FunctionDocstring, ParameterAssignment, - ParameterDocumentation, + ParameterDocstring, ) -from ._abstract_documentation_parser import AbstractDocstringParser +from ._abstract_docstring_parser import AbstractDocstringParser from ._helpers import get_description, get_full_docstring @@ -24,20 +24,20 @@ def __init__(self) -> None: self.__cached_function_node: astroid.FunctionDef | None = None self.__cached_docstring: DocstringParam | None = None - def get_class_documentation(self, class_node: astroid.ClassDef) -> ClassDocumentation: + def get_class_documentation(self, class_node: astroid.ClassDef) -> ClassDocstring: docstring = get_full_docstring(class_node) docstring_obj = parse_docstring(docstring, style=DocstringStyle.EPYDOC) - return ClassDocumentation( + return ClassDocstring( description=get_description(docstring_obj), full_docstring=docstring, ) - def get_function_documentation(self, function_node: astroid.FunctionDef) -> FunctionDocumentation: + def get_function_documentation(self, function_node: astroid.FunctionDef) -> FunctionDocstring: docstring = get_full_docstring(function_node) docstring_obj = self.__get_cached_function_numpydoc_string(function_node, docstring) - return FunctionDocumentation( + return FunctionDocstring( description=get_description(docstring_obj), full_docstring=docstring, ) @@ -47,7 +47,7 @@ def get_parameter_documentation( function_node: astroid.FunctionDef, parameter_name: str, parameter_assigned_by: ParameterAssignment, # noqa: ARG002 - ) -> ParameterDocumentation: + ) -> ParameterDocstring: # For constructors (__init__ functions) the parameters are described on the class if function_node.name == "__init__" and isinstance(function_node.parent, astroid.ClassDef): docstring = get_full_docstring(function_node.parent) @@ -60,10 +60,10 @@ def get_parameter_documentation( matching_parameters_numpydoc = [it for it in all_parameters_numpydoc if it.arg_name == parameter_name] if len(matching_parameters_numpydoc) == 0: - return ParameterDocumentation(type="", default_value="", description="") + return ParameterDocstring(type="", default_value="", description="") last_parameter_docstring_obj = matching_parameters_numpydoc[-1] - return ParameterDocumentation( + return ParameterDocstring( type=last_parameter_docstring_obj.type_name or "", default_value=last_parameter_docstring_obj.default or "", description=last_parameter_docstring_obj.description, diff --git a/src/library_analyzer/processing/api/docstring_parsing/_googledoc_parser.py b/src/library_analyzer/processing/api/docstring_parsing/_googledoc_parser.py new file mode 100644 index 00000000..7665d10b --- /dev/null +++ b/src/library_analyzer/processing/api/docstring_parsing/_googledoc_parser.py @@ -0,0 +1,135 @@ +import astroid +from docstring_parser import Docstring, DocstringParam, DocstringStyle +from docstring_parser import parse as parse_docstring + +from library_analyzer.processing.api.model import ( + AttributeAssignment, + AttributeDocstring, + ClassDocstring, + FunctionDocstring, + ParameterAssignment, + ParameterDocstring, + ResultDocstring, +) + +from ._abstract_docstring_parser import AbstractDocstringParser +from ._helpers import get_description, get_full_docstring + + +class GoogleDocParser(AbstractDocstringParser): + """ + Parses documentation in the Googledoc format. See https://google.github.io/styleguide/pyguide.html#381-docstrings for more information. + + This class is not thread-safe. Each thread should create its own instance. + """ + + def __init__(self) -> None: + self.__cached_function_node: astroid.FunctionDef | None = None + self.__cached_docstring: DocstringParam | None = None + + def get_class_documentation(self, class_node: astroid.ClassDef) -> ClassDocstring: + docstring = get_full_docstring(class_node) + docstring_obj = parse_docstring(docstring, style=DocstringStyle.GOOGLE) + + return ClassDocstring( + description=get_description(docstring_obj), + full_docstring=docstring, + ) + + def get_function_documentation(self, function_node: astroid.FunctionDef) -> FunctionDocstring: + docstring = get_full_docstring(function_node) + docstring_obj = self.__get_cached_function_googledoc_string(function_node, docstring) + + return FunctionDocstring( + description=get_description(docstring_obj), + full_docstring=docstring, + ) + + def get_parameter_documentation( + self, + function_node: astroid.FunctionDef, + parameter_name: str, + parameter_assigned_by: ParameterAssignment, # noqa: ARG002 + ) -> ParameterDocstring: + # For constructors (__init__ functions) the parameters are described on the class + if function_node.name == "__init__" and isinstance(function_node.parent, astroid.ClassDef): + docstring = get_full_docstring(function_node.parent) + else: + docstring = get_full_docstring(function_node) + + # Find matching parameter docstrings + function_googledoc = self.__get_cached_function_googledoc_string(function_node, docstring) + all_parameters_googledoc: list[DocstringParam] = function_googledoc.params + matching_parameters_googledoc = [ + it for it in all_parameters_googledoc if it.arg_name == parameter_name and it.args[0] == "param" + ] + + if len(matching_parameters_googledoc) == 0: + return ParameterDocstring(type="", default_value="", description="") + + last_parameter_docstring_obj = matching_parameters_googledoc[-1] + return ParameterDocstring( + type=last_parameter_docstring_obj.type_name or "", + default_value=last_parameter_docstring_obj.default or "", + description=last_parameter_docstring_obj.description, + ) + + def get_attribute_documentation( + self, + function_node: astroid.FunctionDef, + attribute_name: str, + attribute_assigned_by: AttributeAssignment, # noqa: ARG002 + ) -> AttributeDocstring: + # For constructors (__init__ functions) the attributes are described on the class + if function_node.name == "__init__" and isinstance(function_node.parent, astroid.ClassDef): + docstring = get_full_docstring(function_node.parent) + else: + docstring = get_full_docstring(function_node) + + # Find matching attribute docstrings + function_googledoc = self.__get_cached_function_googledoc_string(function_node, docstring) + all_attributes_googledoc: list[DocstringParam] = function_googledoc.params + matching_attributes_googledoc = [ + it for it in all_attributes_googledoc if it.arg_name == attribute_name and it.args[0] == "attribute" + ] + + if len(matching_attributes_googledoc) == 0: + return AttributeDocstring(type="", default_value="", description="") + + last_attribute_docstring_obj = matching_attributes_googledoc[-1] + return AttributeDocstring( + type=last_attribute_docstring_obj.type_name or "", + default_value=last_attribute_docstring_obj.default or "", + description=last_attribute_docstring_obj.description, + ) + + def get_result_documentation(self, function_node: astroid.FunctionDef) -> ResultDocstring: + if function_node.name == "__init__" and isinstance(function_node.parent, astroid.ClassDef): + docstring = get_full_docstring(function_node.parent) + else: + docstring = get_full_docstring(function_node) + + # Find matching parameter docstrings + function_googledoc = self.__get_cached_function_googledoc_string(function_node, docstring) + function_returns = function_googledoc.returns + + if function_returns is None: + return ResultDocstring(type="", description="") + + return ResultDocstring(type=function_returns.type_name or "", description=function_returns.description or "") + + def __get_cached_function_googledoc_string(self, function_node: astroid.FunctionDef, docstring: str) -> Docstring: + """ + Return the GoogleDocString for the given function node. + + It is only recomputed when the function node differs from the previous one that was passed to this function. + This avoids reparsing the docstring for the function itself and all of its parameters. + + On Lars's system this caused a significant performance improvement: Previously, 8.382s were spent inside the + function get_parameter_documentation when parsing sklearn. Afterward, it was only 2.113s. + """ + if self.__cached_function_node is not function_node: + self.__cached_function_node = function_node + self.__cached_docstring = parse_docstring(docstring, style=DocstringStyle.GOOGLE) + + return self.__cached_docstring diff --git a/src/library_analyzer/processing/api/docstring_parsing/_numpydoc_parser.py b/src/library_analyzer/processing/api/docstring_parsing/_numpydoc_parser.py index 5a655c41..ca6c63d9 100644 --- a/src/library_analyzer/processing/api/docstring_parsing/_numpydoc_parser.py +++ b/src/library_analyzer/processing/api/docstring_parsing/_numpydoc_parser.py @@ -5,13 +5,13 @@ from docstring_parser import parse as parse_docstring from library_analyzer.processing.api.model import ( - ClassDocumentation, - FunctionDocumentation, + ClassDocstring, + FunctionDocstring, ParameterAssignment, - ParameterDocumentation, + ParameterDocstring, ) -from ._abstract_documentation_parser import AbstractDocstringParser +from ._abstract_docstring_parser import AbstractDocstringParser from ._helpers import get_description, get_full_docstring @@ -32,20 +32,20 @@ def __init__(self) -> None: self.__cached_function_node: astroid.FunctionDef | None = None self.__cached_docstring: Docstring | None = None - def get_class_documentation(self, class_node: astroid.ClassDef) -> ClassDocumentation: + def get_class_documentation(self, class_node: astroid.ClassDef) -> ClassDocstring: docstring = get_full_docstring(class_node) docstring_obj = parse_docstring(docstring, style=DocstringStyle.NUMPYDOC) - return ClassDocumentation( + return ClassDocstring( description=get_description(docstring_obj), full_docstring=docstring, ) - def get_function_documentation(self, function_node: astroid.FunctionDef) -> FunctionDocumentation: + def get_function_documentation(self, function_node: astroid.FunctionDef) -> FunctionDocstring: docstring = get_full_docstring(function_node) docstring_obj = self.__get_cached_function_numpydoc_string(function_node, docstring) - return FunctionDocumentation( + return FunctionDocstring( description=get_description(docstring_obj), full_docstring=docstring, ) @@ -55,7 +55,7 @@ def get_parameter_documentation( function_node: astroid.FunctionDef, parameter_name: str, parameter_assigned_by: ParameterAssignment, - ) -> ParameterDocumentation: + ) -> ParameterDocstring: # For constructors (__init__ functions) the parameters are described on the class if function_node.name == "__init__" and isinstance(function_node.parent, astroid.ClassDef): docstring = get_full_docstring(function_node.parent) @@ -72,11 +72,11 @@ def get_parameter_documentation( ] if len(matching_parameters_numpydoc) == 0: - return ParameterDocumentation(type="", default_value="", description="") + return ParameterDocstring(type="", default_value="", description="") last_parameter_numpydoc = matching_parameters_numpydoc[-1] type_, default_value = _get_type_and_default_value(last_parameter_numpydoc) - return ParameterDocumentation( + return ParameterDocstring( type=type_, default_value=default_value, description=last_parameter_numpydoc.description, diff --git a/src/library_analyzer/processing/api/docstring_parsing/_plaintext_docstring_parser.py b/src/library_analyzer/processing/api/docstring_parsing/_plaintext_docstring_parser.py index 9cc2074c..42f75263 100644 --- a/src/library_analyzer/processing/api/docstring_parsing/_plaintext_docstring_parser.py +++ b/src/library_analyzer/processing/api/docstring_parsing/_plaintext_docstring_parser.py @@ -1,31 +1,31 @@ import astroid from library_analyzer.processing.api.model import ( - ClassDocumentation, - FunctionDocumentation, + ClassDocstring, + FunctionDocstring, ParameterAssignment, - ParameterDocumentation, + ParameterDocstring, ) -from ._abstract_documentation_parser import AbstractDocstringParser +from ._abstract_docstring_parser import AbstractDocstringParser from ._helpers import get_full_docstring class PlaintextDocstringParser(AbstractDocstringParser): """Parses documentation in any format. Should not be used if there is another parser for the specific format.""" - def get_class_documentation(self, class_node: astroid.ClassDef) -> ClassDocumentation: + def get_class_documentation(self, class_node: astroid.ClassDef) -> ClassDocstring: docstring = get_full_docstring(class_node) - return ClassDocumentation( + return ClassDocstring( description=docstring, full_docstring=docstring, ) - def get_function_documentation(self, function_node: astroid.FunctionDef) -> FunctionDocumentation: + def get_function_documentation(self, function_node: astroid.FunctionDef) -> FunctionDocstring: docstring = get_full_docstring(function_node) - return FunctionDocumentation( + return FunctionDocstring( description=docstring, full_docstring=docstring, ) @@ -35,5 +35,5 @@ def get_parameter_documentation( function_node: astroid.FunctionDef, # noqa: ARG002 parameter_name: str, # noqa: ARG002 parameter_assigned_by: ParameterAssignment, # noqa: ARG002 - ) -> ParameterDocumentation: - return ParameterDocumentation() + ) -> ParameterDocstring: + return ParameterDocstring() diff --git a/src/library_analyzer/processing/api/docstring_parsing/_restdoc_parser.py b/src/library_analyzer/processing/api/docstring_parsing/_restdoc_parser.py new file mode 100644 index 00000000..c4d37763 --- /dev/null +++ b/src/library_analyzer/processing/api/docstring_parsing/_restdoc_parser.py @@ -0,0 +1,105 @@ +import astroid +from docstring_parser import Docstring, DocstringParam, DocstringStyle +from docstring_parser import parse as parse_docstring + +from library_analyzer.processing.api.model import ( + ClassDocstring, + FunctionDocstring, + ParameterAssignment, + ParameterDocstring, + ResultDocstring, +) + +from ._abstract_docstring_parser import AbstractDocstringParser +from ._helpers import get_description, get_full_docstring + + +class RestDocParser(AbstractDocstringParser): + """ + Parses documentation in the Restdoc format. See https://spring.io/projects/spring-restdocs#samples for more information. + + This class is not thread-safe. Each thread should create its own instance. + """ + + def __init__(self) -> None: + self.__cached_function_node: astroid.FunctionDef | None = None + self.__cached_docstring: DocstringParam | None = None + + def get_class_documentation(self, class_node: astroid.ClassDef) -> ClassDocstring: + docstring = get_full_docstring(class_node) + docstring_obj = parse_docstring(docstring, style=DocstringStyle.REST) + + return ClassDocstring( + description=get_description(docstring_obj), + full_docstring=docstring, + ) + + def get_function_documentation(self, function_node: astroid.FunctionDef) -> FunctionDocstring: + docstring = get_full_docstring(function_node) + docstring_obj = self.__get_cached_function_restdoc_string(function_node, docstring) + + return FunctionDocstring( + description=get_description(docstring_obj), + full_docstring=docstring, + ) + + def get_parameter_documentation( + self, + function_node: astroid.FunctionDef, + parameter_name: str, + parameter_assigned_by: ParameterAssignment, # noqa: ARG002 + ) -> ParameterDocstring: + # For constructors (__init__ functions) the parameters are described on the class + if function_node.name == "__init__" and isinstance(function_node.parent, astroid.ClassDef): + docstring = get_full_docstring(function_node.parent) + else: + docstring = get_full_docstring(function_node) + + # Find matching parameter docstrings + function_restdoc = self.__get_cached_function_restdoc_string(function_node, docstring) + all_parameters_restdoc: list[DocstringParam] = function_restdoc.params + matching_parameters_restdoc = [it for it in all_parameters_restdoc if it.arg_name == parameter_name] + + if len(matching_parameters_restdoc) == 0: + return ParameterDocstring(type="", default_value="", description="") + + last_parameter_docstring_obj = matching_parameters_restdoc[-1] + return ParameterDocstring( + type=last_parameter_docstring_obj.type_name or "", + default_value=last_parameter_docstring_obj.default or "", + description=last_parameter_docstring_obj.description, + ) + + def get_result_documentation(self, function_node: astroid.FunctionDef) -> ResultDocstring: + if function_node.name == "__init__" and isinstance(function_node.parent, astroid.ClassDef): + docstring = get_full_docstring(function_node.parent) + else: + docstring = get_full_docstring(function_node) + + # Find matching parameter docstrings + function_restdoc = self.__get_cached_function_restdoc_string(function_node, docstring) + function_returns = function_restdoc.returns + + if function_returns is None: + return ResultDocstring(type="", description="") + + return ResultDocstring( + type=function_returns.type_name or "", + description=function_returns.description or "", + ) + + def __get_cached_function_restdoc_string(self, function_node: astroid.FunctionDef, docstring: str) -> Docstring: + """ + Return the RestDocString for the given function node. + + It is only recomputed when the function node differs from the previous one that was passed to this function. + This avoids reparsing the docstring for the function itself and all of its parameters. + + On Lars's system this caused a significant performance improvement: Previously, 8.382s were spent inside the + function get_parameter_documentation when parsing sklearn. Afterward, it was only 2.113s. + """ + if self.__cached_function_node is not function_node: + self.__cached_function_node = function_node + self.__cached_docstring = parse_docstring(docstring, style=DocstringStyle.REST) + + return self.__cached_docstring diff --git a/src/library_analyzer/processing/api/model/__init__.py b/src/library_analyzer/processing/api/model/__init__.py index 8152578f..23207541 100644 --- a/src/library_analyzer/processing/api/model/__init__.py +++ b/src/library_analyzer/processing/api/model/__init__.py @@ -4,20 +4,23 @@ API, API_SCHEMA_VERSION, Attribute, + AttributeAssignment, Class, FromImport, Function, Import, Module, + Parameter, + ParameterAssignment, Result, - ResultDocstring, ) -from ._documentation import ( - ClassDocumentation, - FunctionDocumentation, - ParameterDocumentation, +from ._docstring import ( + AttributeDocstring, + ClassDocstring, + FunctionDocstring, + ParameterDocstring, + ResultDocstring, ) -from ._parameters import Parameter, ParameterAssignment from ._purity import ( AttributeAccess, BuiltInFunction, @@ -53,11 +56,13 @@ "AbstractType", "Attribute", "AttributeAccess", + "AttributeAssignment", + "AttributeDocstring", "BoundaryType", "BuiltInFunction", "Call", "Class", - "ClassDocumentation", + "ClassDocstring", "ConcreteImpurityIndicator", "EnumType", "Expression", @@ -65,7 +70,7 @@ "FileWrite", "FromImport", "Function", - "FunctionDocumentation", + "FunctionDocstring", "GlobalAccess", "Import", "ImpurityCertainty", @@ -76,7 +81,7 @@ "Parameter", "ParameterAccess", "ParameterAssignment", - "ParameterDocumentation", + "ParameterDocstring", "Reference", "Result", "ResultDocstring", diff --git a/src/library_analyzer/processing/api/model/_api.py b/src/library_analyzer/processing/api/model/_api.py index 763b7ce3..0a77476a 100644 --- a/src/library_analyzer/processing/api/model/_api.py +++ b/src/library_analyzer/processing/api/model/_api.py @@ -1,35 +1,46 @@ from __future__ import annotations +import json from dataclasses import dataclass, field -from typing import Any +from enum import Enum +from typing import TYPE_CHECKING, Any, TypeAlias from black import FileMode, InvalidInput, format_str from black.brackets import BracketMatchError from black.linegen import CannotSplit from black.trans import CannotTransform -from library_analyzer.utils import parent_id +from library_analyzer.utils import ensure_file_exists, parent_id -from ._documentation import ClassDocumentation, FunctionDocumentation -from ._parameters import Parameter -from ._types import AbstractType +from ._docstring import ClassDocstring, FunctionDocstring, ParameterDocstring, ResultDocstring +from ._types import AbstractType, create_type + +if TYPE_CHECKING: + from pathlib import Path API_SCHEMA_VERSION = 1 class API: @staticmethod - def from_json(json: Any) -> API: - result = API(json["distribution"], json["package"], json["version"]) + def from_json_file(path: Path) -> API: + with path.open(encoding="utf-8") as api_file: + api_json = json.load(api_file) + + return API.from_dict(api_json) + + @staticmethod + def from_dict(d: dict[str, Any]) -> API: + result = API(d["distribution"], d["package"], d["version"]) - for module_json in json.get("modules", []): - result.add_module(Module.from_json(module_json)) + for module_json in d.get("modules", []): + result.add_module(Module.from_dict(module_json)) - for class_json in json.get("classes", []): - result.add_class(Class.from_json(class_json)) + for class_json in d.get("classes", []): + result.add_class(Class.from_dict(class_json)) - for function_json in json.get("functions", []): - result.add_function(Function.from_json(function_json)) + for function_json in d.get("functions", []): + result.add_function(Function.from_dict(function_json)) return result @@ -126,32 +137,67 @@ def get_default_value(self, parameter_id: str) -> str | None: return None - def to_json(self) -> Any: + def get_public_api(self) -> API: + result = API(self.distribution, self.package, self.version) + + for module in self.modules.values(): + result.add_module(module) + + for class_ in self.classes.values(): + if class_.is_public: + copy = Class( + id=class_.id, + qname=class_.qname, + decorators=class_.decorators, + superclasses=class_.superclasses, + is_public=class_.is_public, + reexported_by=class_.reexported_by, + docstring=class_.docstring, + code=class_.code, + instance_attributes=class_.instance_attributes, + ) + for method in class_.methods: + if self.is_public_function(method): + copy.add_method(method) + result.add_class(copy) + + for function in self.functions.values(): + if function.is_public: + result.add_function(function) + + return result + + def to_json_file(self, path: Path) -> None: + ensure_file_exists(path) + with path.open("w", encoding="utf-8") as f: + json.dump(self.to_dict(), f, indent=2) + + def to_dict(self) -> dict[str, Any]: return { "schemaVersion": API_SCHEMA_VERSION, "distribution": self.distribution, "package": self.package, "version": self.version, - "modules": [module.to_json() for module in sorted(self.modules.values(), key=lambda it: it.id)], - "classes": [class_.to_json() for class_ in sorted(self.classes.values(), key=lambda it: it.id)], - "functions": [function.to_json() for function in sorted(self.functions.values(), key=lambda it: it.id)], + "modules": [module.to_dict() for module in sorted(self.modules.values(), key=lambda it: it.id)], + "classes": [class_.to_dict() for class_ in sorted(self.classes.values(), key=lambda it: it.id)], + "functions": [function.to_dict() for function in sorted(self.functions.values(), key=lambda it: it.id)], } class Module: @staticmethod - def from_json(json: Any) -> Module: + def from_dict(d: dict[str, Any]) -> Module: result = Module( - json["id"], - json["name"], - [Import.from_json(import_json) for import_json in json.get("imports", [])], - [FromImport.from_json(from_import_json) for from_import_json in json.get("from_imports", [])], + d["id"], + d["name"], + [Import.from_dict(import_json) for import_json in d.get("imports", [])], + [FromImport.from_dict(from_import_json) for from_import_json in d.get("from_imports", [])], ) - for class_id in json.get("classes", []): + for class_id in d.get("classes", []): result.add_class(class_id) - for function_id in json.get("functions", []): + for function_id in d.get("functions", []): result.add_function(function_id) return result @@ -170,12 +216,12 @@ def add_class(self, class_id: str) -> None: def add_function(self, function_id: str) -> None: self.functions.append(function_id) - def to_json(self) -> Any: + def to_dict(self) -> dict[str, Any]: return { "id": self.id, "name": self.name, - "imports": [import_.to_json() for import_ in self.imports], - "from_imports": [from_import.to_json() for from_import in self.from_imports], + "imports": [import_.to_dict() for import_ in self.imports], + "from_imports": [from_import.to_dict() for from_import in self.from_imports], "classes": self.classes, "functions": self.functions, } @@ -187,10 +233,10 @@ class Import: alias: str | None @staticmethod - def from_json(json: Any) -> Import: - return Import(json["module"], json["alias"]) + def from_dict(d: dict[str, Any]) -> Import: + return Import(d["module"], d["alias"]) - def to_json(self) -> Any: + def to_dict(self) -> dict[str, Any]: return {"module": self.module_name, "alias": self.alias} @@ -201,10 +247,10 @@ class FromImport: alias: str | None @staticmethod - def from_json(json: Any) -> FromImport: - return FromImport(json["module"], json["declaration"], json["alias"]) + def from_dict(d: dict[str, Any]) -> FromImport: + return FromImport(d["module"], d["declaration"], d["alias"]) - def to_json(self) -> Any: + def to_dict(self) -> dict[str, Any]: return { "module": self.module_name, "declaration": self.declaration_name, @@ -221,28 +267,28 @@ class Class: methods: list[str] = field(init=False) is_public: bool reexported_by: list[str] - documentation: ClassDocumentation + docstring: ClassDocstring code: str instance_attributes: list[Attribute] @staticmethod - def from_json(json: Any) -> Class: + def from_dict(d: dict[str, Any]) -> Class: result = Class( - json["id"], - json["qname"], - json.get("decorators", []), - json.get("superclasses", []), - json.get("is_public", True), - json.get("reexported_by", []), - ClassDocumentation(description=json.get("description", "")), - json.get("code", ""), + d["id"], + d["qname"], + d.get("decorators", []), + d.get("superclasses", []), + d.get("is_public", True), + d.get("reexported_by", []), + ClassDocstring(description=d.get("description", "")), + d.get("code", ""), [ - Attribute.from_json(instance_attribute, json["id"]) - for instance_attribute in json.get("instance_attributes", []) + Attribute.from_dict(instance_attribute, d["id"]) + for instance_attribute in d.get("instance_attributes", []) ], ) - for method_id in json["methods"]: + for method_id in d["methods"]: result.add_method(method_id) return result @@ -257,7 +303,7 @@ def name(self) -> str: def add_method(self, method_id: str) -> None: self.methods.append(method_id) - def to_json(self) -> Any: + def to_dict(self) -> dict[str, Any]: return { "id": self.id, "name": self.name, @@ -267,9 +313,9 @@ def to_json(self) -> Any: "methods": self.methods, "is_public": self.is_public, "reexported_by": self.reexported_by, - "description": self.documentation.description, + "description": self.docstring.description, "code": self.code, - "instance_attributes": [attribute.to_json() for attribute in self.instance_attributes], + "instance_attributes": [attribute.to_dict() for attribute in self.instance_attributes], } def get_formatted_code(self, *, cut_documentation: bool = False) -> str: @@ -321,17 +367,18 @@ def _cut_documentation_from_code(code: str, api_element: Class | Function) -> st @dataclass(frozen=True) class Attribute: + id: str name: str types: AbstractType | None class_id: str | None = None - def to_json(self) -> dict[str, Any]: - types_json = self.types.to_json() if self.types is not None else None - return {"name": self.name, "types": types_json} - @staticmethod - def from_json(json: Any, class_id: str | None = None) -> Attribute: - return Attribute(json["name"], AbstractType.from_json(json.get("types", {})), class_id) + def from_dict(d: dict[str, Any], class_id: str | None = None) -> Attribute: + return Attribute(d["id"], d["name"], AbstractType.from_dict(d.get("types", {})), class_id) + + def to_dict(self) -> dict[str, Any]: + types_json = self.types.to_dict() if self.types is not None else None + return {"id": self.id, "name": self.name, "types": types_json} @dataclass(frozen=True) @@ -343,38 +390,38 @@ class Function: results: list[Result] is_public: bool reexported_by: list[str] - documentation: FunctionDocumentation + docstring: FunctionDocstring code: str @staticmethod - def from_json(json: Any) -> Function: + def from_dict(d: dict[str, Any]) -> Function: return Function( - json["id"], - json["qname"], - json.get("decorators", []), - [Parameter.from_json(parameter_json) for parameter_json in json.get("parameters", [])], - [Result.from_json(result_json) for result_json in json.get("results", [])], - json.get("is_public", True), - json.get("reexported_by", []), - FunctionDocumentation(description=json.get("description", "")), - json.get("code", ""), + d["id"], + d["qname"], + d.get("decorators", []), + [Parameter.from_dict(parameter_json) for parameter_json in d.get("parameters", [])], + [Result.from_dict(result_json) for result_json in d.get("results", [])], + d.get("is_public", True), + d.get("reexported_by", []), + FunctionDocstring(description=d.get("description", "")), + d.get("code", ""), ) @property def name(self) -> str: return self.qname.rsplit(".", maxsplit=1)[-1] - def to_json(self) -> Any: + def to_dict(self) -> dict[str, Any]: return { "id": self.id, "name": self.name, "qname": self.qname, "decorators": self.decorators, - "parameters": [parameter.to_json() for parameter in self.parameters], - "results": [result.to_json() for result in self.results], + "parameters": [parameter.to_dict() for parameter in self.parameters], + "results": [result.to_dict() for result in self.results], "is_public": self.is_public, "reexported_by": self.reexported_by, - "description": self.documentation.description, + "description": self.docstring.description, "code": self.code, } @@ -385,35 +432,137 @@ def get_formatted_code(self, *, cut_documentation: bool = False) -> str: return formatted_code +class Parameter: + @staticmethod + def from_dict(d: dict[str, Any]) -> Parameter: + return Parameter( + d["id"], + d["name"], + d["qname"], + d.get("default_value", None), + ParameterAssignment[d.get("assigned_by", "POSITION_OR_NAME")], + d.get("is_public", True), + ParameterDocstring.from_dict(d.get("docstring", {})), + ) + + def __hash__(self) -> int: + return hash( + ( + self.id, + self.name, + self.qname, + self.default_value, + self.assigned_by, + self.is_public, + self.docstring, + ), + ) + + def __eq__(self, other: object) -> bool: + return ( + isinstance(other, Parameter) + and self.id == other.id + and self.name == other.name + and self.qname == other.qname + and self.default_value == other.default_value + and self.assigned_by == other.assigned_by + and self.is_public == other.is_public + and self.docstring == other.docstring + and self.type == other.type + ) + + def __init__( + self, + id_: str, + name: str, + qname: str, + default_value: str | None, + assigned_by: ParameterAssignment, + is_public: bool, + docstring: ParameterDocstring, + ) -> None: + self.id: str = id_ + self.name: str = name + self.qname: str = qname + self.default_value: str | None = default_value + self.assigned_by: ParameterAssignment = assigned_by + self.is_public: bool = is_public + self.docstring = docstring + self.type: AbstractType | None = create_type(docstring) + + def is_optional(self) -> bool: + return self.default_value is not None + + def is_required(self) -> bool: + return self.default_value is None + + def to_dict(self) -> dict[str, Any]: + return { + "id": self.id, + "name": self.name, + "qname": self.qname, + "default_value": self.default_value, + "assigned_by": self.assigned_by.name, + "is_public": self.is_public, + "docstring": self.docstring.to_dict(), + "type": self.type.to_dict() if self.type is not None else {}, + } + + +class AttributeAssignment(Enum): + """ + How arguments are assigned to attributes. The attributes must appear exactly in this order in an attribute list. + + IMPLICIT attributes appear on instance methods (usually called "self") and on class methods (usually called "cls"). + POSITION_ONLY attributes precede the "/" in an attribute list. NAME_ONLY attributes follow the "*" or the + POSITIONAL_VARARGS attribute ("*args"). Between the "/" and the "*" the POSITION_OR_NAME attributes reside. Finally, + the attribute list might optionally include a NAMED_VARARG attribute ("**kwargs"). + """ + + IMPLICIT = "IMPLICIT" + POSITION_ONLY = "POSITION_ONLY" + POSITION_OR_NAME = "POSITION_OR_NAME" + POSITIONAL_VARARG = "POSITIONAL_VARARG" + NAME_ONLY = "NAME_ONLY" + NAMED_VARARG = "NAMED_VARARG" + + +class ParameterAssignment(Enum): + """ + How arguments are assigned to parameters. The parameters must appear exactly in this order in a parameter list. + + IMPLICIT parameters appear on instance methods (usually called "self") and on class methods (usually called "cls"). + POSITION_ONLY parameters precede the "/" in a parameter list. NAME_ONLY parameters follow the "*" or the + POSITIONAL_VARARGS parameter ("*args"). Between the "/" and the "*" the POSITION_OR_NAME parameters reside. Finally, + the parameter list might optionally include a NAMED_VARARG parameter ("**kwargs"). + """ + + IMPLICIT = "IMPLICIT" + POSITION_ONLY = "POSITION_ONLY" + POSITION_OR_NAME = "POSITION_OR_NAME" + POSITIONAL_VARARG = "POSITIONAL_VARARG" + NAME_ONLY = "NAME_ONLY" + NAMED_VARARG = "NAMED_VARARG" + + @dataclass(frozen=True) class Result: + id: str name: str docstring: ResultDocstring function_id: str | None = None @staticmethod - def from_json(json: Any, function_id: str | None = None) -> Result: + def from_dict(d: dict[str, Any], function_id: str | None = None) -> Result: return Result( - json["name"], - ResultDocstring.from_json(json.get("docstring", {})), + d["id"], + d["name"], + ResultDocstring.from_dict(d.get("docstring", {})), function_id, ) - def to_json(self) -> Any: - return {"name": self.name, "docstring": self.docstring.to_json()} - - -@dataclass(frozen=True) -class ResultDocstring: - type: str - description: str + def to_dict(self) -> dict[str, Any]: + return {"id": self.id, "name": self.name, "docstring": self.docstring.to_dict()} - @staticmethod - def from_json(json: Any) -> ResultDocstring: - return ResultDocstring( - json.get("type", ""), - json.get("description", ""), - ) - def to_json(self) -> Any: - return {"type": self.type, "description": self.description} +ApiElement: TypeAlias = Module | Class | Attribute | Function | Parameter | Result diff --git a/src/library_analyzer/processing/api/model/_docstring.py b/src/library_analyzer/processing/api/model/_docstring.py new file mode 100644 index 00000000..108de8ef --- /dev/null +++ b/src/library_analyzer/processing/api/model/_docstring.py @@ -0,0 +1,75 @@ +from __future__ import annotations + +import dataclasses +from dataclasses import dataclass +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from typing import Any + + +@dataclass(frozen=True) +class ClassDocstring: + description: str = "" + full_docstring: str = "" + + @staticmethod + def from_dict(d: dict[str, Any]) -> ClassDocstring: + return ClassDocstring(**d) + + def to_dict(self) -> dict[str, Any]: + return dataclasses.asdict(self) + + +@dataclass(frozen=True) +class FunctionDocstring: + description: str = "" + full_docstring: str = "" + + @staticmethod + def from_dict(d: dict[str, Any]) -> FunctionDocstring: + return FunctionDocstring(**d) + + def to_dict(self) -> dict[str, Any]: + return dataclasses.asdict(self) + + +@dataclass(frozen=True) +class ParameterDocstring: + type: str = "" + default_value: str = "" + description: str = "" + + @staticmethod + def from_dict(d: dict[str, Any]) -> ParameterDocstring: + return ParameterDocstring(**d) + + def to_dict(self) -> dict[str, Any]: + return dataclasses.asdict(self) + + +@dataclass(frozen=True) +class AttributeDocstring: + type: str = "" + default_value: str = "" + description: str = "" + + @staticmethod + def from_dict(d: dict[str, Any]) -> AttributeDocstring: + return AttributeDocstring(**d) + + def to_dict(self) -> dict[str, Any]: + return dataclasses.asdict(self) + + +@dataclass(frozen=True) +class ResultDocstring: + type: str = "" + description: str = "" + + @staticmethod + def from_dict(d: dict[str, Any]) -> ResultDocstring: + return ResultDocstring(**d) + + def to_dict(self) -> dict[str, Any]: + return dataclasses.asdict(self) diff --git a/src/library_analyzer/processing/api/model/_documentation.py b/src/library_analyzer/processing/api/model/_documentation.py deleted file mode 100644 index ccbae18d..00000000 --- a/src/library_analyzer/processing/api/model/_documentation.py +++ /dev/null @@ -1,44 +0,0 @@ -from __future__ import annotations - -import dataclasses -from dataclasses import dataclass - - -@dataclass(frozen=True) -class ClassDocumentation: - description: str = "" - full_docstring: str = "" - - @staticmethod - def from_dict(d: dict) -> ClassDocumentation: - return ClassDocumentation(**d) - - def to_dict(self) -> dict: - return dataclasses.asdict(self) - - -@dataclass(frozen=True) -class FunctionDocumentation: - description: str = "" - full_docstring: str = "" - - @staticmethod - def from_dict(d: dict) -> FunctionDocumentation: - return FunctionDocumentation(**d) - - def to_dict(self) -> dict: - return dataclasses.asdict(self) - - -@dataclass(frozen=True) -class ParameterDocumentation: - type: str = "" - default_value: str = "" - description: str = "" - - @staticmethod - def from_dict(d: dict) -> ParameterDocumentation: - return ParameterDocumentation(**d) - - def to_dict(self) -> dict: - return dataclasses.asdict(self) diff --git a/src/library_analyzer/processing/api/model/_parameters.py b/src/library_analyzer/processing/api/model/_parameters.py deleted file mode 100644 index b25091a2..00000000 --- a/src/library_analyzer/processing/api/model/_parameters.py +++ /dev/null @@ -1,102 +0,0 @@ -from __future__ import annotations - -from enum import Enum -from typing import Any - -from ._documentation import ParameterDocumentation -from ._types import AbstractType, create_type - - -class Parameter: - @staticmethod - def from_json(json: Any) -> Parameter: - return Parameter( - json["id"], - json["name"], - json["qname"], - json.get("default_value", None), - ParameterAssignment[json.get("assigned_by", "POSITION_OR_NAME")], - json.get("is_public", True), - ParameterDocumentation.from_dict(json.get("docstring", {})), - ) - - def __hash__(self) -> int: - return hash( - ( - self.id, - self.name, - self.qname, - self.default_value, - self.assigned_by, - self.is_public, - self.documentation, - ), - ) - - def __eq__(self, other: object) -> bool: - return ( - isinstance(other, Parameter) - and self.id == other.id - and self.name == other.name - and self.qname == other.qname - and self.default_value == other.default_value - and self.assigned_by == other.assigned_by - and self.is_public == other.is_public - and self.documentation == other.documentation - and self.type == other.type - ) - - def __init__( - self, - id_: str, - name: str, - qname: str, - default_value: str | None, - assigned_by: ParameterAssignment, - is_public: bool, - documentation: ParameterDocumentation, - ) -> None: - self.id: str = id_ - self.name: str = name - self.qname: str = qname - self.default_value: str | None = default_value - self.assigned_by: ParameterAssignment = assigned_by - self.is_public: bool = is_public - self.documentation = documentation - self.type: AbstractType | None = create_type(documentation) - - def is_optional(self) -> bool: - return self.default_value is not None - - def is_required(self) -> bool: - return self.default_value is None - - def to_json(self) -> Any: - return { - "id": self.id, - "name": self.name, - "qname": self.qname, - "default_value": self.default_value, - "assigned_by": self.assigned_by.name, - "is_public": self.is_public, - "docstring": self.documentation.to_dict(), - "type": self.type.to_json() if self.type is not None else {}, - } - - -class ParameterAssignment(Enum): - """ - How arguments are assigned to parameters. The parameters must appear exactly in this order in a parameter list. - - IMPLICIT parameters appear on instance methods (usually called "self") and on class methods (usually called "cls"). - POSITION_ONLY parameters precede the "/" in a parameter list. NAME_ONLY parameters follow the "*" or the - POSITIONAL_VARARGS parameter ("*args"). Between the "/" and the "*" the POSITION_OR_NAME parameters reside. Finally, - the parameter list might optionally include a NAMED_VARARG parameter ("**kwargs"). - """ - - IMPLICIT = "IMPLICIT" - POSITION_ONLY = "POSITION_ONLY" - POSITION_OR_NAME = "POSITION_OR_NAME" - POSITIONAL_VARARG = ("POSITIONAL_VARARG",) - NAME_ONLY = "NAME_ONLY" - NAMED_VARARG = "NAMED_VARARG" diff --git a/src/library_analyzer/processing/api/model/_types.py b/src/library_analyzer/processing/api/model/_types.py index a6c35a7b..f9b7602d 100644 --- a/src/library_analyzer/processing/api/model/_types.py +++ b/src/library_analyzer/processing/api/model/_types.py @@ -6,28 +6,28 @@ from typing import TYPE_CHECKING, Any, ClassVar if TYPE_CHECKING: - from ._documentation import ParameterDocumentation + from ._docstring import ParameterDocstring class AbstractType(metaclass=ABCMeta): - @abstractmethod - def to_json(self) -> dict[str, Any]: - pass - @classmethod - def from_json(cls, json: Any) -> AbstractType | None: - if json is None: + def from_dict(cls, d: dict[str, Any]) -> AbstractType | None: + if d is None: return None - value: AbstractType | None = NamedType.from_json(json) + value: AbstractType | None = NamedType.from_dict(d) if value is not None: return value - value = EnumType.from_json(json) + value = EnumType.from_dict(d) if value is not None: return value - value = BoundaryType.from_json(json) + value = BoundaryType.from_dict(d) if value is not None: return value - return UnionType.from_json(json) + return UnionType.from_dict(d) + + @abstractmethod + def to_dict(self) -> dict[str, Any]: + pass @dataclass(frozen=True) @@ -35,16 +35,16 @@ class NamedType(AbstractType): name: str @classmethod - def from_json(cls, json: Any) -> NamedType | None: - if json.get("kind", "") == cls.__name__: - return NamedType(json["name"]) + def from_dict(cls, d: Any) -> NamedType | None: + if d.get("kind", "") == cls.__name__: + return NamedType(d["name"]) return None @classmethod def from_string(cls, string: str) -> NamedType: return NamedType(string) - def to_json(self) -> dict[str, str]: + def to_dict(self) -> dict[str, str]: return {"kind": self.__class__.__name__, "name": self.name} @@ -54,9 +54,9 @@ class EnumType(AbstractType): full_match: str = field(default="", compare=False) @classmethod - def from_json(cls, json: Any) -> EnumType | None: - if json["kind"] == cls.__name__: - return EnumType(json["values"]) + def from_dict(cls, d: Any) -> EnumType | None: + if d["kind"] == cls.__name__: + return EnumType(d["values"]) return None @classmethod @@ -99,7 +99,7 @@ def update(self, enum: EnumType) -> EnumType: values.update(enum.values) return EnumType(frozenset(values)) - def to_json(self) -> dict[str, Any]: + def to_dict(self) -> dict[str, Any]: return {"kind": self.__class__.__name__, "values": set(self.values)} @@ -125,14 +125,14 @@ def _is_inclusive(cls, bracket: str) -> bool: raise ValueError(f"{bracket} is not one of []()") @classmethod - def from_json(cls, json: Any) -> BoundaryType | None: - if json["kind"] == cls.__name__: + def from_dict(cls, d: Any) -> BoundaryType | None: + if d["kind"] == cls.__name__: return BoundaryType( - json["base_type"], - json["min"], - json["max"], - json["min_inclusive"], - json["max_inclusive"], + d["base_type"], + d["min"], + d["max"], + d["min_inclusive"], + d["max_inclusive"], ) return None @@ -197,7 +197,7 @@ def __eq__(self, __o: object) -> bool: return self.max_inclusive == __o.max_inclusive return False - def to_json(self) -> dict[str, Any]: + def to_dict(self) -> dict[str, Any]: return { "kind": self.__class__.__name__, "base_type": self.base_type, @@ -213,20 +213,20 @@ class UnionType(AbstractType): types: list[AbstractType] @classmethod - def from_json(cls, json: Any) -> UnionType | None: - if json["kind"] == cls.__name__: + def from_dict(cls, d: Any) -> UnionType | None: + if d["kind"] == cls.__name__: types = [] - for element in json["types"]: - type_ = AbstractType.from_json(element) + for element in d["types"]: + type_ = AbstractType.from_dict(element) if type_ is not None: types.append(type_) return UnionType(types) return None - def to_json(self) -> dict[str, Any]: + def to_dict(self) -> dict[str, Any]: type_list = [] for t in self.types: - type_list.append(t.to_json()) + type_list.append(t.to_dict()) return {"kind": self.__class__.__name__, "types": type_list} @@ -235,7 +235,7 @@ def __hash__(self) -> int: def create_type( - parameter_documentation: ParameterDocumentation, + parameter_documentation: ParameterDocstring, ) -> AbstractType | None: type_string = parameter_documentation.type types: list[AbstractType] = [] diff --git a/src/library_analyzer/processing/dependencies/_get_dependency.py b/src/library_analyzer/processing/dependencies/_get_dependency.py index 8fb5ba13..17046f40 100644 --- a/src/library_analyzer/processing/dependencies/_get_dependency.py +++ b/src/library_analyzer/processing/dependencies/_get_dependency.py @@ -1,10 +1,10 @@ -import spacy from spacy.matcher import DependencyMatcher from spacy.tokens import Token from spacy.tokens.doc import Doc from spacy.tokens.span import Span from library_analyzer.processing.api.model import API, Parameter +from library_analyzer.utils import load_language from ._dependency_patterns import dependency_matcher_patterns from ._parameter_dependencies import ( @@ -19,8 +19,6 @@ ) from ._preprocess_docstring import preprocess_docstring -PIPELINE = "en_core_web_sm" - def extract_lefts_and_rights(curr_token: Token, extracted: list | None = None) -> list: """Given a spaCy token, extract recursively all tokens in its dependency subtree in inorder traversal.""" @@ -172,7 +170,7 @@ def get_dependencies(api: API) -> APIDependencies: Parse and preprocess each doc string from every function. Extract and return all dependencies as a dict with function and parameter names as keys. """ - nlp = spacy.load(PIPELINE) + nlp = load_language("en_core_web_sm") matcher = DependencyMatcher(nlp.vocab) spacy_id_to_pattern_id_mapping: dict = {} @@ -186,7 +184,7 @@ def get_dependencies(api: API) -> APIDependencies: parameters = function.parameters all_dependencies[function_name] = {} for parameter in parameters: - docstring = parameter.documentation.description + docstring = parameter.docstring.description docstring_preprocessed = preprocess_docstring(docstring) doc = nlp(docstring_preprocessed) param_dependencies = [] diff --git a/src/library_analyzer/processing/dependencies/_parameter_dependencies.py b/src/library_analyzer/processing/dependencies/_parameter_dependencies.py index 7c631b45..b3286681 100644 --- a/src/library_analyzer/processing/dependencies/_parameter_dependencies.py +++ b/src/library_analyzer/processing/dependencies/_parameter_dependencies.py @@ -1,41 +1,58 @@ from __future__ import annotations +import json from dataclasses import dataclass -from typing import Any +from typing import TYPE_CHECKING, Any from library_analyzer.processing.api.model import Parameter +from library_analyzer.utils import ensure_file_exists +if TYPE_CHECKING: + from pathlib import Path -@dataclass -class Action: - action: str - - @classmethod - def from_json(cls, json: Any) -> Action: - return cls(json["action"]) - def to_json(self) -> dict: - return {"action": self.action} - - -class RuntimeAction(Action): - def __init__(self, action: str) -> None: - super().__init__(action) +@dataclass +class APIDependencies: + dependencies: dict + def to_json_file(self, path: Path) -> None: + ensure_file_exists(path) + with path.open("w") as f: + json.dump(self.to_dict(), f, indent=2) -class StaticAction(Action): - def __init__(self, action: str) -> None: - super().__init__(action) + def to_dict(self) -> dict[str, Any]: + return { + function_name: { + parameter_name: [dependency.to_dict() for dependency in dependencies] + for parameter_name, dependencies in parameter_name.items() + } + for function_name, parameter_name in self.dependencies.items() + } -class ParameterIsIgnored(StaticAction): - def __init__(self, action: str) -> None: - super().__init__(action) +@dataclass +class Dependency: + hasDependentParameter: Parameter # noqa: N815 + isDependingOn: Parameter # noqa: N815 + hasCondition: Condition # noqa: N815 + hasAction: Action # noqa: N815 + @classmethod + def from_dict(cls, d: dict[str, Any]) -> Dependency: + return cls( + Parameter.from_dict(d["hasDependentParameter"]), + Parameter.from_dict(d["isDependingOn"]), + Condition.from_dict(d["hasCondition"]), + Action.from_dict(d["hasAction"]), + ) -class ParameterIsIllegal(StaticAction): - def __init__(self, action: str) -> None: - super().__init__(action) + def to_dict(self) -> dict[str, Any]: + return { + "hasDependentParameter": self.hasDependentParameter.to_dict(), + "isDependingOn": self.isDependingOn.to_dict(), + "hasCondition": self.hasCondition.to_dict(), + "hasAction": self.hasAction.to_dict(), + } @dataclass @@ -43,10 +60,10 @@ class Condition: condition: str @classmethod - def from_json(cls, json: Any) -> Condition: - return cls(json["condition"]) + def from_dict(cls, d: dict[str, Any]) -> Condition: + return cls(d["condition"]) - def to_json(self) -> dict: + def to_dict(self) -> dict[str, Any]: return {"condition": self.condition} @@ -71,39 +88,32 @@ def __init__(self, condition: str) -> None: @dataclass -class Dependency: - hasDependentParameter: Parameter # noqa: N815 - isDependingOn: Parameter # noqa: N815 - hasCondition: Condition # noqa: N815 - hasAction: Action # noqa: N815 +class Action: + action: str @classmethod - def from_json(cls, json: Any) -> Dependency: - return cls( - Parameter.from_json(json["hasDependentParameter"]), - Parameter.from_json(json["isDependingOn"]), - Condition.from_json(json["hasCondition"]), - Action.from_json(json["hasAction"]), - ) + def from_dict(cls, d: dict[str, Any]) -> Action: + return cls(d["action"]) - def to_json(self) -> dict: - return { - "hasDependentParameter": self.hasDependentParameter.to_json(), - "isDependingOn": self.isDependingOn.to_json(), - "hasCondition": self.hasCondition.to_json(), - "hasAction": self.hasAction.to_json(), - } + def to_dict(self) -> dict[str, Any]: + return {"action": self.action} -@dataclass -class APIDependencies: - dependencies: dict +class RuntimeAction(Action): + def __init__(self, action: str) -> None: + super().__init__(action) - def to_json(self) -> dict: - return { - function_name: { - parameter_name: [dependency.to_json() for dependency in dependencies] - for parameter_name, dependencies in parameter_name.items() - } - for function_name, parameter_name in self.dependencies.items() - } + +class StaticAction(Action): + def __init__(self, action: str) -> None: + super().__init__(action) + + +class ParameterIsIgnored(StaticAction): + def __init__(self, action: str) -> None: + super().__init__(action) + + +class ParameterIsIllegal(StaticAction): + def __init__(self, action: str) -> None: + super().__init__(action) diff --git a/src/library_analyzer/processing/migration/_migrate.py b/src/library_analyzer/processing/migration/_migrate.py index a30bc8df..c3d8bd99 100644 --- a/src/library_analyzer/processing/migration/_migrate.py +++ b/src/library_analyzer/processing/migration/_migrate.py @@ -33,8 +33,8 @@ class Migration: annotationsv1: AnnotationStore mappings: list[Mapping] - reliable_similarity: float = 0.9 - unsure_similarity: float = 0.8 + reliable_similarity: float = 0.85 + unsure_similarity: float = 0.75 migrated_annotation_store: AnnotationStore = field(init=False) unsure_migrated_annotation_store: AnnotationStore = field(init=False) @@ -131,6 +131,11 @@ def add_annotations_based_on_similarity(self, annotation: AbstractAnnotation, ma def _get_mappings_for_table(self) -> list[str]: table_rows: list[str] = [] for mapping in self.mappings: + if len(mapping.get_apiv1_elements()) > 0 and isinstance( + mapping.get_apiv1_elements()[0], + Attribute | Result, + ): + continue def print_api_element(api_element: Attribute | Class | Function | Parameter | Result) -> str: if isinstance(api_element, Result): @@ -146,100 +151,42 @@ def print_api_element(api_element: Attribute | Class | Function | Parameter | Re table_rows.append(f"{mapping.similarity:.4}|{apiv1_elements}|{apiv2_elements}|") return table_rows - def _get_not_mapped_api_elements_for_table(self, apiv1: API, apiv2: API) -> list[str]: - not_mapped_api_elements: list[str] = [] - not_mapped_apiv1_elements = self._get_not_mapped_api_elements_as_string(apiv1) - for element_id in not_mapped_apiv1_elements: - not_mapped_api_elements.append(f"-|`{element_id}`||") - not_mapped_apiv2_elements = self._get_not_mapped_api_elements_as_string(apiv2, print_for_apiv2=True) - for element_id in not_mapped_apiv2_elements: - not_mapped_api_elements.append(f"-||`{element_id}`|") - return not_mapped_api_elements + def _get_unmapped_api_elements_for_table(self, apiv1: API, apiv2: API) -> list[str]: + unmapped_api_elements: list[str] = [] + unmapped_apiv1_elements = self._get_unmapped_api_elements_as_string(apiv1) + for element_id in unmapped_apiv1_elements: + unmapped_api_elements.append(f"-|`{element_id}`||") + unmapped_apiv2_elements = self._get_unmapped_api_elements_as_string(apiv2, print_for_apiv2=True) + for element_id in unmapped_apiv2_elements: + unmapped_api_elements.append(f"-||`{element_id}`|") + return unmapped_api_elements - def _get_not_mapped_api_elements_as_string(self, api: API, print_for_apiv2: bool = False) -> list[str]: - not_mapped_api_elements: list[str] = [] + def _get_unmapped_api_elements_as_string(self, api: API, print_for_apiv2: bool = False) -> list[str]: + api_elements: list[str] = [] + for class_ in api.classes.values(): + api_elements.append(class_.id) + for function in api.functions.values(): + api_elements.append(function.id) + for parameter in api.parameters().values(): + api_elements.append(parameter.id) + # Attribute und Result could be added here - def is_included(api_element: Attribute | Class | Function | Parameter | Result) -> bool: - if not print_for_apiv2: - for mapping in self.mappings: - for element in mapping.get_apiv1_elements(): - if ( - isinstance(api_element, Attribute) - and isinstance(element, Attribute) - and element.name == api_element.name - and isinstance(element.types, type(api_element.types)) - ): - return True - if ( - isinstance(api_element, Result) - and isinstance(element, Result) - and element.name == api_element.name - and element.docstring == api_element.docstring - ): - return True - if ( - not isinstance(api_element, Attribute | Result) - and not isinstance( - element, - Attribute | Result, - ) - and element.id == api_element.id - ): - return True - return False + mapped_api_elements: set[str] = set() + if print_for_apiv2: for mapping in self.mappings: for element in mapping.get_apiv2_elements(): - if ( - isinstance(api_element, Attribute) - and isinstance(element, Attribute) - and element.name == api_element.name - and isinstance(element.types, type(api_element.types)) - ): - return True - if ( - isinstance(api_element, Result) - and isinstance(element, Result) - and element.name == api_element.name - and element.docstring == api_element.docstring - ): - return True - if ( - not isinstance(api_element, Attribute | Result) - and not isinstance( - element, - Attribute | Result, - ) - and element.id == api_element.id - ): - return True - return False + mapped_api_elements.add(element.id) + else: + for mapping in self.mappings: + for element in mapping.get_apiv1_elements(): + mapped_api_elements.add(element.id) - for class_ in api.classes.values(): - if not is_included(class_): - not_mapped_api_elements.append(class_.id) - for function in api.functions.values(): - if not is_included(function): - not_mapped_api_elements.append(function.id) - for parameter in api.parameters().values(): - if not is_included(parameter): - not_mapped_api_elements.append(parameter.id) - for attribute, class_ in [ - (attribute, class_1) for class_1 in api.classes.values() for attribute in class_1.instance_attributes - ]: - if not is_included(attribute): - not_mapped_api_elements.append(class_.id + "/" + attribute.name) - for result, function in [ - (result, function_1) for function_1 in api.functions.values() for result in function_1.results - ]: - if not is_included(result): - not_mapped_api_elements.append(function.id + "/" + result.name) - return not_mapped_api_elements + return [element for element in api_elements if element not in mapped_api_elements] def print(self, apiv1: API, apiv2: API) -> None: print("**Similarity**|**APIV1**|**APIV2**|**comment**\n:-----:|:-----:|:-----:|:----:|") table_body = self._get_mappings_for_table() - table_body.extend(self._get_not_mapped_api_elements_for_table(apiv1, apiv2)) - table_body.sort(key=lambda row: max(len(cell.split("/")) for cell in row.split("|")[:-1])) + table_body.extend(self._get_unmapped_api_elements_for_table(apiv1, apiv2)) print("\n".join(table_body)) def _handle_duplicates(self) -> None: @@ -289,7 +236,7 @@ def _handle_duplicates(self) -> None: different_values = set() first_annotation_and_value: tuple[AbstractAnnotation, str] | None = None for annotation in sorted_duplicates: - annotation_dict = annotation.to_json() + annotation_dict = annotation.to_dict() for key in [ "target", "authors", @@ -307,7 +254,9 @@ def _handle_duplicates(self) -> None: first_annotation, first_value = first_annotation_and_value if len(different_values) > 1: different_values.remove(first_value) - comment = "Conflicting Attribute during migration: " + ", ".join(sorted(different_values)) + comment = "Conflicting attribute found during migration: " + ", ".join( + sorted(different_values), + ) first_annotation.comment = ( "\n".join([comment, first_annotation.comment]) if len(first_annotation.comment) > 0 diff --git a/src/library_analyzer/processing/migration/annotations/_get_migration_text.py b/src/library_analyzer/processing/migration/annotations/_get_migration_text.py index 6094e84f..6cb49f82 100644 --- a/src/library_analyzer/processing/migration/annotations/_get_migration_text.py +++ b/src/library_analyzer/processing/migration/annotations/_get_migration_text.py @@ -36,7 +36,7 @@ def _get_further_information(annotation: AbstractAnnotation) -> str: ): return "" if isinstance(annotation, BoundaryAnnotation): - return " with the interval '" + str(annotation.interval.to_json()) + "'" + return " with the interval '" + str(annotation.interval.to_dict()) + "'" if isinstance(annotation, CalledAfterAnnotation): return " with the method '" + annotation.calledAfterName + "' that should be called before" if isinstance(annotation, DescriptionAnnotation): @@ -75,7 +75,7 @@ def _get_further_information(annotation: AbstractAnnotation) -> str: ) value += "'" return value - return " with the data '" + str(annotation.to_json()) + "'" + return " with the data '" + str(annotation.to_dict()) + "'" def get_migration_text( diff --git a/src/library_analyzer/processing/migration/annotations/_migrate_boundary_annotation.py b/src/library_analyzer/processing/migration/annotations/_migrate_boundary_annotation.py index 43b02b46..67e5d7ce 100644 --- a/src/library_analyzer/processing/migration/annotations/_migrate_boundary_annotation.py +++ b/src/library_analyzer/processing/migration/annotations/_migrate_boundary_annotation.py @@ -9,18 +9,12 @@ ) from library_analyzer.processing.api.model import ( AbstractType, - Attribute, NamedType, Parameter, - Result, UnionType, ) from library_analyzer.processing.migration.model import ( - ManyToManyMapping, - ManyToOneMapping, Mapping, - OneToManyMapping, - OneToOneMapping, ) from ._constants import migration_author @@ -70,20 +64,17 @@ def _contains_number_and_is_discrete( return False, False -def migrate_boundary_annotation(boundary_annotation: BoundaryAnnotation, mapping: Mapping) -> list[AbstractAnnotation]: - boundary_annotation = deepcopy(boundary_annotation) - authors = boundary_annotation.authors - authors.append(migration_author) - boundary_annotation.authors = authors - - annotated_apiv1_element = get_annotated_api_element(boundary_annotation, mapping.get_apiv1_elements()) +def migrate_boundary_annotation(origin_annotation: BoundaryAnnotation, mapping: Mapping) -> list[AbstractAnnotation]: + annotated_apiv1_element = get_annotated_api_element(origin_annotation, mapping.get_apiv1_elements()) if annotated_apiv1_element is None or not isinstance(annotated_apiv1_element, Parameter): return [] - if isinstance(mapping, OneToOneMapping | ManyToOneMapping): - parameter = mapping.get_apiv2_elements()[0] - if isinstance(parameter, Attribute | Result): - return [] + migrated_annotations: list[AbstractAnnotation] = [] + for parameter in mapping.get_apiv2_elements(): + boundary_annotation = deepcopy(origin_annotation) + authors = boundary_annotation.authors + authors.append(migration_author) + boundary_annotation.authors = authors if isinstance(parameter, Parameter): ( parameter_expects_number, @@ -93,7 +84,8 @@ def migrate_boundary_annotation(boundary_annotation: BoundaryAnnotation, mapping boundary_annotation.reviewResult = EnumReviewResult.UNSURE boundary_annotation.comment = get_migration_text(boundary_annotation, mapping) boundary_annotation.target = parameter.id - return [boundary_annotation] + migrated_annotations.append(boundary_annotation) + continue if parameter_expects_number or (parameter.type is None and annotated_apiv1_element.type is None): if (parameter_type_is_discrete != boundary_annotation.interval.isDiscrete) and not ( parameter.type is None and annotated_apiv1_element.type is None @@ -106,8 +98,9 @@ def migrate_boundary_annotation(boundary_annotation: BoundaryAnnotation, mapping parameter_type_is_discrete, ) boundary_annotation.target = parameter.id - return [boundary_annotation] - return [ + migrated_annotations.append(boundary_annotation) + continue + migrated_annotations.append( TodoAnnotation( parameter.id, authors, @@ -116,60 +109,5 @@ def migrate_boundary_annotation(boundary_annotation: BoundaryAnnotation, mapping EnumReviewResult.NONE, get_migration_text(boundary_annotation, mapping, for_todo_annotation=True), ), - ] - migrated_annotations: list[AbstractAnnotation] = [] - if isinstance(mapping, OneToManyMapping | ManyToManyMapping): - for parameter in mapping.get_apiv2_elements(): - if isinstance(parameter, Parameter): - is_number, is_discrete = _contains_number_and_is_discrete(parameter.type) - if ( - parameter.type is not None and is_number and is_discrete == boundary_annotation.interval.isDiscrete - ) or (parameter.type is None and annotated_apiv1_element.type is None): - migrated_annotations.append( - BoundaryAnnotation( - parameter.id, - authors, - boundary_annotation.reviewers, - boundary_annotation.comment, - EnumReviewResult.NONE, - boundary_annotation.interval, - ), - ) - elif parameter.type is not None and is_number: - migrated_annotations.append( - BoundaryAnnotation( - parameter.id, - authors, - boundary_annotation.reviewers, - get_migration_text(boundary_annotation, mapping), - EnumReviewResult.UNSURE, - migrate_interval_to_fit_parameter_type( - boundary_annotation.interval, - is_discrete, - ), - ), - ) - elif parameter.type is None: - migrated_annotations.append( - BoundaryAnnotation( - parameter.id, - authors, - boundary_annotation.reviewers, - get_migration_text(boundary_annotation, mapping), - EnumReviewResult.UNSURE, - boundary_annotation.interval, - ), - ) - continue - if not isinstance(parameter, Attribute | Result): - migrated_annotations.append( - TodoAnnotation( - parameter.id, - authors, - boundary_annotation.reviewers, - boundary_annotation.comment, - EnumReviewResult.NONE, - get_migration_text(boundary_annotation, mapping, for_todo_annotation=True), - ), - ) + ) return migrated_annotations diff --git a/src/library_analyzer/processing/migration/annotations/_migrate_called_after_annotation.py b/src/library_analyzer/processing/migration/annotations/_migrate_called_after_annotation.py index 1050e1c8..1ba8737e 100644 --- a/src/library_analyzer/processing/migration/annotations/_migrate_called_after_annotation.py +++ b/src/library_analyzer/processing/migration/annotations/_migrate_called_after_annotation.py @@ -14,17 +14,16 @@ def migrate_called_after_annotation( - called_after_annotation: CalledAfterAnnotation, + origin_annotation: CalledAfterAnnotation, mapping: Mapping, mappings: list[Mapping], ) -> list[AbstractAnnotation]: - called_after_annotation = deepcopy(called_after_annotation) - authors = called_after_annotation.authors - authors.append(migration_author) - called_after_annotation.authors = authors - migrated_annotations: list[AbstractAnnotation] = [] for element in mapping.get_apiv2_elements(): + called_after_annotation = deepcopy(origin_annotation) + authors = called_after_annotation.authors + authors.append(migration_author) + called_after_annotation.authors = authors if not isinstance(element, Function): if not isinstance(element, Attribute | Result): migrated_annotations.append( @@ -40,22 +39,7 @@ def migrate_called_after_annotation( continue called_before_functions = _get_function_called_before_replacements(called_after_annotation, mappings, element) - if len(called_before_functions) == 0: - migrated_annotations.append( - CalledAfterAnnotation( - element.id, - authors, - called_after_annotation.reviewers, - get_migration_text( - called_after_annotation, - mapping, - additional_information=called_before_functions, - ), - EnumReviewResult.UNSURE, - called_after_annotation.calledAfterName, - ), - ) - elif len(called_before_functions) == 1 and called_before_functions[0] != element: + if len(called_before_functions) == 1 and called_before_functions[0] != element: migrated_annotations.append( CalledAfterAnnotation( element.id, diff --git a/src/library_analyzer/processing/migration/annotations/_migrate_description_annotation.py b/src/library_analyzer/processing/migration/annotations/_migrate_description_annotation.py index 3fb422bf..2fd6e4d8 100644 --- a/src/library_analyzer/processing/migration/annotations/_migrate_description_annotation.py +++ b/src/library_analyzer/processing/migration/annotations/_migrate_description_annotation.py @@ -6,11 +6,9 @@ EnumReviewResult, TodoAnnotation, ) -from library_analyzer.processing.api.model import Attribute, Result +from library_analyzer.processing.api.model import Attribute, Parameter, Result from library_analyzer.processing.migration.model import ( - ManyToOneMapping, Mapping, - OneToOneMapping, ) from ._constants import migration_author @@ -19,21 +17,14 @@ def migrate_description_annotation( - description_annotation: DescriptionAnnotation, + origin_annotation: DescriptionAnnotation, mapping: Mapping, ) -> list[AbstractAnnotation]: - description_annotation = deepcopy(description_annotation) + description_annotation = deepcopy(origin_annotation) authors = description_annotation.authors authors.append(migration_author) description_annotation.authors = authors - if isinstance(mapping, ManyToOneMapping | OneToOneMapping): - element = mapping.get_apiv2_elements()[0] - if isinstance(element, Attribute | Result): - return [] - description_annotation.target = element.id - return [description_annotation] - annotated_apiv1_element = get_annotated_api_element(description_annotation, mapping.get_apiv1_elements()) if annotated_apiv1_element is None: return [] @@ -41,6 +32,26 @@ def migrate_description_annotation( description_annotations: list[AbstractAnnotation] = [] for element in mapping.get_apiv2_elements(): if isinstance(element, type(annotated_apiv1_element)) and not isinstance(element, Attribute | Result): + documentationv1 = ( + annotated_apiv1_element.docstring.description + if isinstance(element, Parameter) + else element.docstring.full_docstring + ) + documentationv2 = ( + element.docstring.description if isinstance(element, Parameter) else element.docstring.full_docstring + ) + if documentationv1 != documentationv2 and documentationv2 != description_annotation.newDescription: + description_annotations.append( + DescriptionAnnotation( + element.id, + authors, + description_annotation.reviewers, + description_annotation.comment, + EnumReviewResult.UNSURE, + newDescription=description_annotation.newDescription, + ), + ) + continue description_annotations.append( DescriptionAnnotation( element.id, diff --git a/src/library_analyzer/processing/migration/annotations/_migrate_enum_annotation.py b/src/library_analyzer/processing/migration/annotations/_migrate_enum_annotation.py index 276cffa8..f38aebfa 100644 --- a/src/library_analyzer/processing/migration/annotations/_migrate_enum_annotation.py +++ b/src/library_analyzer/processing/migration/annotations/_migrate_enum_annotation.py @@ -15,13 +15,7 @@ Result, UnionType, ) -from library_analyzer.processing.migration.model import ( - ManyToManyMapping, - ManyToOneMapping, - Mapping, - OneToManyMapping, - OneToOneMapping, -) +from library_analyzer.processing.migration.model import Mapping from ._constants import migration_author from ._get_annotated_api_element import get_annotated_api_element @@ -42,18 +36,17 @@ def _default_value_is_in_instance_values_or_is_empty(default_value: str | None, return default_value is None or default_value in (pair.stringValue for pair in pairs) or len(default_value) == 0 -def migrate_enum_annotation(enum_annotation: EnumAnnotation, mapping: Mapping) -> list[AbstractAnnotation]: - enum_annotation = deepcopy(enum_annotation) - authors = enum_annotation.authors - authors.append(migration_author) - enum_annotation.authors = authors - - annotated_apiv1_element = get_annotated_api_element(enum_annotation, mapping.get_apiv1_elements()) +def migrate_enum_annotation(origin_annotation: EnumAnnotation, mapping: Mapping) -> list[AbstractAnnotation]: + annotated_apiv1_element = get_annotated_api_element(origin_annotation, mapping.get_apiv1_elements()) if annotated_apiv1_element is None or not isinstance(annotated_apiv1_element, Parameter): return [] - if isinstance(mapping, OneToOneMapping | ManyToOneMapping): - parameter = mapping.get_apiv2_elements()[0] + migrated_annotations: list[AbstractAnnotation] = [] + for parameter in mapping.get_apiv2_elements(): + enum_annotation = deepcopy(origin_annotation) + authors = enum_annotation.authors + authors.append(migration_author) + enum_annotation.authors = authors if isinstance(parameter, Attribute | Result): return [] if isinstance(parameter, Parameter): @@ -63,16 +56,25 @@ def migrate_enum_annotation(enum_annotation: EnumAnnotation, mapping: Mapping) - and _default_value_is_in_instance_values_or_is_empty(parameter.default_value, enum_annotation.pairs) ) or (parameter.type is None and annotated_apiv1_element.type is None): enum_annotation.target = parameter.id - return [enum_annotation] - if isinstance(parameter.type, NamedType): + migrated_annotations.append(enum_annotation) + continue + if ( + isinstance(parameter.type, NamedType) + and not _contains_string(parameter.type) + and not ( + isinstance(annotated_apiv1_element.type, NamedType) + and parameter.type.name == annotated_apiv1_element + ) + ): # assuming api has been chanced to an enum type: # do not migrate annotation - return [] + continue enum_annotation.reviewResult = EnumReviewResult.UNSURE enum_annotation.comment = get_migration_text(enum_annotation, mapping) enum_annotation.target = parameter.id - return [enum_annotation] - return [ + migrated_annotations.append(enum_annotation) + continue + migrated_annotations.append( TodoAnnotation( parameter.id, authors, @@ -81,51 +83,5 @@ def migrate_enum_annotation(enum_annotation: EnumAnnotation, mapping: Mapping) - EnumReviewResult.NONE, get_migration_text(enum_annotation, mapping, for_todo_annotation=True), ), - ] - - migrated_annotations: list[AbstractAnnotation] = [] - if isinstance(mapping, OneToManyMapping | ManyToManyMapping): - for parameter in mapping.get_apiv2_elements(): - if isinstance(parameter, Parameter): - if ( - parameter.type is not None - and _contains_string(parameter.type) - and _default_value_is_in_instance_values_or_is_empty(parameter.default_value, enum_annotation.pairs) - ) or (parameter.type is None and annotated_apiv1_element.type is None): - migrated_annotations.append( - EnumAnnotation( - parameter.id, - authors, - enum_annotation.reviewers, - enum_annotation.comment, - EnumReviewResult.NONE, - enum_annotation.enumName, - enum_annotation.pairs, - ), - ) - continue - if isinstance(parameter.type, NamedType): - continue - migrated_annotations.append( - EnumAnnotation( - parameter.id, - authors, - enum_annotation.reviewers, - get_migration_text(enum_annotation, mapping), - EnumReviewResult.UNSURE, - enum_annotation.enumName, - enum_annotation.pairs, - ), - ) - elif not isinstance(parameter, Attribute | Result): - migrated_annotations.append( - TodoAnnotation( - parameter.id, - authors, - enum_annotation.reviewers, - enum_annotation.comment, - EnumReviewResult.NONE, - get_migration_text(enum_annotation, mapping, for_todo_annotation=True), - ), - ) + ) return migrated_annotations diff --git a/src/library_analyzer/processing/migration/annotations/_migrate_expert_annotation.py b/src/library_analyzer/processing/migration/annotations/_migrate_expert_annotation.py index 3b9683bd..a95ae125 100644 --- a/src/library_analyzer/processing/migration/annotations/_migrate_expert_annotation.py +++ b/src/library_analyzer/processing/migration/annotations/_migrate_expert_annotation.py @@ -8,9 +8,7 @@ ) from library_analyzer.processing.api.model import Attribute, Result from library_analyzer.processing.migration.model import ( - ManyToOneMapping, Mapping, - OneToOneMapping, ) from ._constants import migration_author @@ -18,19 +16,12 @@ from ._get_migration_text import get_migration_text -def migrate_expert_annotation(expert_annotation: ExpertAnnotation, mapping: Mapping) -> list[AbstractAnnotation]: - expert_annotation = deepcopy(expert_annotation) +def migrate_expert_annotation(origin_annotation: ExpertAnnotation, mapping: Mapping) -> list[AbstractAnnotation]: + expert_annotation = deepcopy(origin_annotation) authors = expert_annotation.authors authors.append(migration_author) expert_annotation.authors = authors - if isinstance(mapping, ManyToOneMapping | OneToOneMapping): - element = mapping.get_apiv2_elements()[0] - if isinstance(element, Attribute | Result): - return [] - expert_annotation.target = element.id - return [expert_annotation] - annotated_apiv1_element = get_annotated_api_element(expert_annotation, mapping.get_apiv1_elements()) if annotated_apiv1_element is None: return [] diff --git a/src/library_analyzer/processing/migration/annotations/_migrate_group_annotation.py b/src/library_analyzer/processing/migration/annotations/_migrate_group_annotation.py index 25d9a4f4..d6e81abe 100644 --- a/src/library_analyzer/processing/migration/annotations/_migrate_group_annotation.py +++ b/src/library_analyzer/processing/migration/annotations/_migrate_group_annotation.py @@ -14,18 +14,17 @@ def migrate_group_annotation( - annotation: GroupAnnotation, + origin_annotation: GroupAnnotation, mapping: Mapping, mappings: list[Mapping], ) -> list[AbstractAnnotation]: - group_annotation = deepcopy(annotation) - authors = group_annotation.authors - authors.append(migration_author) - group_annotation.authors = authors - migrated_annotations: list[AbstractAnnotation] = [] for functionv2 in mapping.get_apiv2_elements(): + group_annotation = deepcopy(origin_annotation) + authors = group_annotation.authors + authors.append(migration_author) + group_annotation.authors = authors if isinstance(functionv2, Attribute | Result): continue if not isinstance(functionv2, Function): @@ -59,7 +58,7 @@ def migrate_group_annotation( ] grouped_parameters = remove_duplicates_and_preserve_order - if len(grouped_parameters) < 2 < len(group_annotation.parameters): + if len(grouped_parameters) < 2 <= len(group_annotation.parameters): migrated_annotations.append( TodoAnnotation( target=functionv2.id, @@ -95,17 +94,9 @@ def migrate_group_annotation( ), ) else: - migrated_annotations.append( - GroupAnnotation( - target=functionv2.id, - authors=authors, - reviewers=group_annotation.reviewers, - comment=group_annotation.comment, - reviewResult=EnumReviewResult.NONE, - groupName=group_annotation.groupName, - parameters=[parameter.name for parameter in grouped_parameters], - ), - ) + group_annotation.target = functionv2.id + group_annotation.parameters = [parameter.name for parameter in grouped_parameters] + migrated_annotations.append(group_annotation) return migrated_annotations diff --git a/src/library_analyzer/processing/migration/annotations/_migrate_move_annotation.py b/src/library_analyzer/processing/migration/annotations/_migrate_move_annotation.py index 17423c3e..842da9af 100644 --- a/src/library_analyzer/processing/migration/annotations/_migrate_move_annotation.py +++ b/src/library_analyzer/processing/migration/annotations/_migrate_move_annotation.py @@ -14,9 +14,7 @@ Result, ) from library_analyzer.processing.migration.model import ( - ManyToOneMapping, Mapping, - OneToOneMapping, ) from ._constants import migration_author @@ -52,42 +50,17 @@ def _was_moved( ) -def migrate_move_annotation(move_annotation: MoveAnnotation, mapping: Mapping) -> list[AbstractAnnotation]: - move_annotation = deepcopy(move_annotation) - authors = move_annotation.authors - authors.append(migration_author) - move_annotation.authors = authors - - if isinstance(mapping, ManyToOneMapping | OneToOneMapping): - element = mapping.get_apiv2_elements()[0] - if isinstance(element, Attribute | Result): - return [] - if not is_moveable(element): - return [ - TodoAnnotation( - element.id, - authors, - move_annotation.reviewers, - move_annotation.comment, - EnumReviewResult.NONE, - get_migration_text(move_annotation, mapping, for_todo_annotation=True), - ), - ] - if _was_moved( - get_annotated_api_element(move_annotation, mapping.get_apiv1_elements()), - element, - move_annotation, - ): - move_annotation.reviewResult = EnumReviewResult.UNSURE - move_annotation.target = element.id - return [move_annotation] - - annotated_apiv1_element = get_annotated_api_element(move_annotation, mapping.get_apiv1_elements()) +def migrate_move_annotation(origin_annotation: MoveAnnotation, mapping: Mapping) -> list[AbstractAnnotation]: + annotated_apiv1_element = get_annotated_api_element(origin_annotation, mapping.get_apiv1_elements()) if annotated_apiv1_element is None: return [] - move_annotations: list[AbstractAnnotation] = [] + migrated_annotations: list[AbstractAnnotation] = [] for element in mapping.get_apiv2_elements(): + move_annotation = deepcopy(origin_annotation) + authors = move_annotation.authors + authors.append(migration_author) + move_annotation.authors = authors if ( isinstance(element, type(annotated_apiv1_element)) and is_moveable(element) @@ -102,18 +75,11 @@ def migrate_move_annotation(move_annotation: MoveAnnotation, mapping: Mapping) - ) else EnumReviewResult.NONE ) - move_annotations.append( - MoveAnnotation( - element.id, - authors, - move_annotation.reviewers, - move_annotation.comment, - review_result, - move_annotation.destination, - ), - ) + move_annotation.target = element.id + move_annotation.reviewResult = review_result + migrated_annotations.append(move_annotation) elif not isinstance(element, Attribute | Result): - move_annotations.append( + migrated_annotations.append( TodoAnnotation( element.id, authors, @@ -123,4 +89,4 @@ def migrate_move_annotation(move_annotation: MoveAnnotation, mapping: Mapping) - get_migration_text(move_annotation, mapping, for_todo_annotation=True), ), ) - return move_annotations + return migrated_annotations diff --git a/src/library_analyzer/processing/migration/annotations/_migrate_remove_annotation.py b/src/library_analyzer/processing/migration/annotations/_migrate_remove_annotation.py index a6315f5e..345a2d29 100644 --- a/src/library_analyzer/processing/migration/annotations/_migrate_remove_annotation.py +++ b/src/library_analyzer/processing/migration/annotations/_migrate_remove_annotation.py @@ -14,9 +14,7 @@ Result, ) from library_analyzer.processing.migration.model import ( - ManyToOneMapping, Mapping, - OneToOneMapping, ) from ._constants import migration_author @@ -28,36 +26,17 @@ def is_removeable(element: Attribute | Class | Function | Parameter | Result) -> return isinstance(element, Class | Function) -def migrate_remove_annotation(remove_annotation: RemoveAnnotation, mapping: Mapping) -> list[AbstractAnnotation]: - remove_annotation = deepcopy(remove_annotation) - authors = remove_annotation.authors - authors.append(migration_author) - remove_annotation.authors = authors - - if isinstance(mapping, ManyToOneMapping | OneToOneMapping): - element = mapping.get_apiv2_elements()[0] - if isinstance(element, Attribute | Result): - return [] - if not is_removeable(element): - return [ - TodoAnnotation( - element.id, - authors, - remove_annotation.reviewers, - remove_annotation.comment, - EnumReviewResult.NONE, - get_migration_text(remove_annotation, mapping, for_todo_annotation=True), - ), - ] - remove_annotation.target = element.id - return [remove_annotation] - - annotated_apiv1_element = get_annotated_api_element(remove_annotation, mapping.get_apiv1_elements()) +def migrate_remove_annotation(origin_annotation: RemoveAnnotation, mapping: Mapping) -> list[AbstractAnnotation]: + annotated_apiv1_element = get_annotated_api_element(origin_annotation, mapping.get_apiv1_elements()) if annotated_apiv1_element is None: return [] remove_annotations: list[AbstractAnnotation] = [] for element in mapping.get_apiv2_elements(): + remove_annotation = deepcopy(origin_annotation) + authors = remove_annotation.authors + authors.append(migration_author) + remove_annotation.authors = authors if ( isinstance(element, type(annotated_apiv1_element)) and is_removeable(element) diff --git a/src/library_analyzer/processing/migration/annotations/_migrate_rename_annotation.py b/src/library_analyzer/processing/migration/annotations/_migrate_rename_annotation.py index a6c5aa09..b9f5b42d 100644 --- a/src/library_analyzer/processing/migration/annotations/_migrate_rename_annotation.py +++ b/src/library_analyzer/processing/migration/annotations/_migrate_rename_annotation.py @@ -8,9 +8,7 @@ ) from library_analyzer.processing.api.model import Attribute, Result from library_analyzer.processing.migration.model import ( - ManyToOneMapping, Mapping, - OneToOneMapping, ) from ._constants import migration_author @@ -18,52 +16,29 @@ from ._get_migration_text import get_migration_text -def migrate_rename_annotation(rename_annotation: RenameAnnotation, mapping: Mapping) -> list[AbstractAnnotation]: - rename_annotation = deepcopy(rename_annotation) - new_name = rename_annotation.newName - authors = rename_annotation.authors - authors.append(migration_author) - rename_annotation.authors = authors - - if isinstance(mapping, ManyToOneMapping | OneToOneMapping): - element = mapping.get_apiv2_elements()[0] - if isinstance(element, Attribute | Result): - return [] - rename_annotation.target = element.id - return [rename_annotation] - - annotated_apiv1_element = get_annotated_api_element(rename_annotation, mapping.get_apiv1_elements()) +def migrate_rename_annotation(origin_annotation: RenameAnnotation, mapping: Mapping) -> list[AbstractAnnotation]: + annotated_apiv1_element = get_annotated_api_element(origin_annotation, mapping.get_apiv1_elements()) if annotated_apiv1_element is None: return [] annotations: list[AbstractAnnotation] = [] for element in mapping.get_apiv2_elements(): + rename_annotation = deepcopy(origin_annotation) + authors = rename_annotation.authors + authors.append(migration_author) + rename_annotation.authors = authors if isinstance(element, type(annotated_apiv1_element)) and not isinstance(element, Attribute | Result): if element.name not in ( - new_name, - rename_annotation.target.split(".")[-1], + origin_annotation.newName, + rename_annotation.target.split("/")[-1], ): - annotations.append( - RenameAnnotation( - element.id, - authors, - rename_annotation.reviewers, - get_migration_text(rename_annotation, mapping), - EnumReviewResult.UNSURE, - rename_annotation.newName, - ), - ) + rename_annotation.comment = get_migration_text(rename_annotation, mapping) + rename_annotation.reviewResult = EnumReviewResult.UNSURE + rename_annotation.target = element.id + annotations.append(rename_annotation) else: - annotations.append( - RenameAnnotation( - element.id, - authors, - rename_annotation.reviewers, - rename_annotation.comment, - EnumReviewResult.NONE, - rename_annotation.newName, - ), - ) + rename_annotation.target = element.id + annotations.append(rename_annotation) elif not isinstance(element, Attribute | Result): annotations.append( TodoAnnotation( diff --git a/src/library_analyzer/processing/migration/annotations/_migrate_todo_annotation.py b/src/library_analyzer/processing/migration/annotations/_migrate_todo_annotation.py index dd83ef4b..cdb25352 100644 --- a/src/library_analyzer/processing/migration/annotations/_migrate_todo_annotation.py +++ b/src/library_analyzer/processing/migration/annotations/_migrate_todo_annotation.py @@ -7,9 +7,7 @@ ) from library_analyzer.processing.api.model import Attribute, Result from library_analyzer.processing.migration.model import ( - ManyToOneMapping, Mapping, - OneToOneMapping, ) from ._constants import migration_author @@ -17,25 +15,17 @@ from ._get_migration_text import get_migration_text -def migrate_todo_annotation(todo_annotation: TodoAnnotation, mapping: Mapping) -> list[AbstractAnnotation]: - todo_annotation = deepcopy(todo_annotation) - authors = todo_annotation.authors - authors.append(migration_author) - todo_annotation.authors = authors - - if isinstance(mapping, ManyToOneMapping | OneToOneMapping): - element = mapping.get_apiv2_elements()[0] - if isinstance(element, Attribute | Result): - return [] - todo_annotation.target = element.id - return [todo_annotation] - - annotated_apiv1_element = get_annotated_api_element(todo_annotation, mapping.get_apiv1_elements()) +def migrate_todo_annotation(origin_annotation: TodoAnnotation, mapping: Mapping) -> list[AbstractAnnotation]: + annotated_apiv1_element = get_annotated_api_element(origin_annotation, mapping.get_apiv1_elements()) if annotated_apiv1_element is None: return [] todo_annotations: list[AbstractAnnotation] = [] for element in mapping.get_apiv2_elements(): + todo_annotation = deepcopy(origin_annotation) + authors = todo_annotation.authors + authors.append(migration_author) + todo_annotation.authors = authors if isinstance(element, type(annotated_apiv1_element)) and not isinstance(element, Attribute | Result): todo_annotations.append( TodoAnnotation( @@ -54,7 +44,7 @@ def migrate_todo_annotation(todo_annotation: TodoAnnotation, mapping: Mapping) - authors, todo_annotation.reviewers, todo_annotation.comment, - EnumReviewResult.NONE, + EnumReviewResult.UNSURE, get_migration_text(todo_annotation, mapping, for_todo_annotation=True), ), ) diff --git a/src/library_analyzer/processing/migration/annotations/_migrate_value_annotation.py b/src/library_analyzer/processing/migration/annotations/_migrate_value_annotation.py index d8993a1d..558fa0b0 100644 --- a/src/library_analyzer/processing/migration/annotations/_migrate_value_annotation.py +++ b/src/library_analyzer/processing/migration/annotations/_migrate_value_annotation.py @@ -20,11 +20,7 @@ UnionType, ) from library_analyzer.processing.migration.model import ( - ManyToManyMapping, - ManyToOneMapping, Mapping, - OneToManyMapping, - OneToOneMapping, ) from ._constants import migration_author @@ -32,80 +28,45 @@ from ._get_migration_text import get_migration_text -def migrate_value_annotation(annotation: ValueAnnotation, mapping: Mapping) -> list[AbstractAnnotation]: - value_annotation = deepcopy(annotation) - authors = value_annotation.authors - authors.append(migration_author) - value_annotation.authors = authors - - if isinstance(mapping, OneToOneMapping | ManyToOneMapping): - parameter = mapping.get_apiv2_elements()[0] - if isinstance(parameter, Attribute | Result): - return [] +def migrate_value_annotation(origin_annotation: ValueAnnotation, mapping: Mapping) -> list[AbstractAnnotation]: + migrated_annotations: list[AbstractAnnotation] = [] + for parameter in mapping.get_apiv2_elements(): + value_annotation = deepcopy(origin_annotation) + authors = value_annotation.authors + authors.append(migration_author) + value_annotation.authors = authors if isinstance(parameter, Parameter): if isinstance(value_annotation, ConstantAnnotation): migrated_constant_annotation = migrate_constant_annotation(value_annotation, parameter, mapping) if migrated_constant_annotation is not None: - return [migrated_constant_annotation] + migrated_annotations.append(migrated_constant_annotation) + continue if isinstance(value_annotation, OmittedAnnotation): migrated_omitted_annotation = migrate_omitted_annotation(value_annotation, parameter, mapping) if migrated_omitted_annotation is not None: - return [migrated_omitted_annotation] + migrated_annotations.append(migrated_omitted_annotation) + continue if isinstance(value_annotation, OptionalAnnotation): migrated_optional_annotation = migrate_optional_annotation(value_annotation, parameter, mapping) if migrated_optional_annotation is not None: - return [migrated_optional_annotation] + migrated_annotations.append(migrated_optional_annotation) + continue if isinstance(value_annotation, RequiredAnnotation): migrated_required_annotation = migrate_required_annotation(value_annotation, parameter, mapping) if migrated_required_annotation is not None: - return [migrated_required_annotation] - return [ - TodoAnnotation( - parameter.id, - authors, - value_annotation.reviewers, - value_annotation.comment, - EnumReviewResult.NONE, - get_migration_text(value_annotation, mapping), - ), - ] - migrated_annotations: list[AbstractAnnotation] = [] - if isinstance(mapping, OneToManyMapping | ManyToManyMapping): - for parameter in mapping.get_apiv2_elements(): - if isinstance(parameter, Result | Attribute): - continue - if isinstance(parameter, Parameter): - if isinstance(value_annotation, ConstantAnnotation): - migrated_constant_annotation = migrate_constant_annotation(value_annotation, parameter, mapping) - if migrated_constant_annotation is not None: - migrated_annotations.append(migrated_constant_annotation) - continue - elif isinstance(value_annotation, OmittedAnnotation): - migrated_omitted_annotation = migrate_omitted_annotation(value_annotation, parameter, mapping) - if migrated_omitted_annotation is not None: - migrated_annotations.append(migrated_omitted_annotation) - continue - elif isinstance(value_annotation, OptionalAnnotation): - migrated_optional_annotation = migrate_optional_annotation(value_annotation, parameter, mapping) - if migrated_optional_annotation is not None: - migrated_annotations.append(migrated_optional_annotation) - continue - elif isinstance(value_annotation, RequiredAnnotation): - migrated_required_annotation = migrate_required_annotation(value_annotation, parameter, mapping) - if migrated_required_annotation is not None: - migrated_annotations.append(migrated_required_annotation) - continue - if not isinstance(parameter, Attribute | Result): - migrated_annotations.append( - TodoAnnotation( - parameter.id, - authors, - value_annotation.reviewers, - value_annotation.comment, - EnumReviewResult.NONE, - get_migration_text(value_annotation, mapping), - ), - ) + migrated_annotations.append(migrated_required_annotation) + continue + if not isinstance(parameter, Attribute | Result): + migrated_annotations.append( + TodoAnnotation( + parameter.id, + authors, + value_annotation.reviewers, + value_annotation.comment, + EnumReviewResult.NONE, + get_migration_text(value_annotation, mapping), + ), + ) return migrated_annotations @@ -242,7 +203,9 @@ def migrate_omitted_annotation( parameterv1 = get_annotated_api_element_by_type(omitted_annotation, mapping.get_apiv1_elements(), Parameter) if parameterv1 is None: return None - if _have_same_type(parameterv1.type, parameterv2.type) and _have_same_value( + if not _have_same_type(parameterv1.type, parameterv2.type): + return None + if _have_same_value( parameterv1.default_value, parameterv2.default_value, ): @@ -253,19 +216,13 @@ def migrate_omitted_annotation( omitted_annotation.comment, EnumReviewResult.NONE, ) - if _have_same_type(parameterv1.type, parameterv2.type) and not _have_same_value( - parameterv1.default_value, - parameterv2.default_value, - ): - return OmittedAnnotation( - parameterv2.id, - omitted_annotation.authors, - omitted_annotation.reviewers, - get_migration_text(omitted_annotation, mapping), - EnumReviewResult.UNSURE, - ) - - return None + return OmittedAnnotation( + parameterv2.id, + omitted_annotation.authors, + omitted_annotation.reviewers, + get_migration_text(omitted_annotation, mapping), + EnumReviewResult.UNSURE, + ) def migrate_optional_annotation( diff --git a/src/library_analyzer/processing/migration/model/_api_mapping.py b/src/library_analyzer/processing/migration/model/_api_mapping.py index eaf12f87..9b600685 100644 --- a/src/library_analyzer/processing/migration/model/_api_mapping.py +++ b/src/library_analyzer/processing/migration/model/_api_mapping.py @@ -1,5 +1,5 @@ from collections.abc import Callable -from typing import TypeVar, Union +from typing import TypeVar from library_analyzer.processing.api.model import ( API, @@ -13,14 +13,13 @@ from ._differ import AbstractDiffer from ._mapping import Mapping, OneToOneMapping, merge_mappings -api_element = Union[Attribute, Class, Function, Parameter, Result] +api_element = Attribute | Class | Function | Parameter | Result API_ELEMENTS = TypeVar("API_ELEMENTS", Attribute, Class, Function, Parameter, Result) class APIMapping: threshold_of_similarity_between_mappings: float threshold_of_similarity_for_creation_of_mappings: float - threshold_of_merging_mappings: float apiv1: API apiv2: API differ: AbstractDiffer @@ -32,14 +31,12 @@ def __init__( differ: AbstractDiffer, threshold_of_similarity_for_creation_of_mappings: float = 0.5, threshold_of_similarity_between_mappings: float = 0.05, - threshold_of_merging_mappings: float = 0.3, ) -> None: self.apiv1 = apiv1 self.apiv2 = apiv2 self.differ = differ self.threshold_of_similarity_for_creation_of_mappings = threshold_of_similarity_for_creation_of_mappings self.threshold_of_similarity_between_mappings = threshold_of_similarity_between_mappings - self.threshold_of_merging_mappings = threshold_of_merging_mappings def _get_mappings_for_api_elements( self, @@ -66,17 +63,7 @@ def map_api(self) -> list[Mapping]: if related_mappings is not None: for mapping in related_mappings: new_mapping = None - if isinstance(mapping.get_apiv1_elements()[0], Attribute) and isinstance( - mapping.get_apiv2_elements()[0], - Attribute, - ): - new_mapping = self._get_mappings_for_api_elements( - [element for element in mapping.get_apiv1_elements() if isinstance(element, Attribute)], - [element for element in mapping.get_apiv2_elements() if isinstance(element, Attribute)], - self.differ.compute_attribute_similarity, - ) - mappings.extend(new_mapping) - elif isinstance(mapping.get_apiv1_elements()[0], Class) and isinstance( + if isinstance(mapping.get_apiv1_elements()[0], Class) and isinstance( mapping.get_apiv2_elements()[0], Class, ): @@ -106,16 +93,7 @@ def map_api(self) -> list[Mapping]: self.differ.compute_parameter_similarity, ) mappings.extend(new_mapping) - elif isinstance(mapping.get_apiv1_elements()[0], Result) and isinstance( - mapping.get_apiv2_elements()[0], - Result, - ): - new_mapping = self._get_mappings_for_api_elements( - [element for element in mapping.get_apiv1_elements() if isinstance(element, Result)], - [element for element in mapping.get_apiv2_elements() if isinstance(element, Result)], - self.differ.compute_result_similarity, - ) - mappings.extend(new_mapping) + # Attribute und Result could be added here if new_mapping is not None and len(new_mapping) > 0: self.differ.notify_new_mapping(new_mapping) else: @@ -140,22 +118,7 @@ def map_api(self) -> list[Mapping]: self.differ.compute_parameter_similarity, ), ) - - mappings.extend( - self._get_mappings_for_api_elements( - [attribute for class_ in self.apiv1.classes.values() for attribute in class_.instance_attributes], - [attribute for class_ in self.apiv2.classes.values() for attribute in class_.instance_attributes], - self.differ.compute_attribute_similarity, - ), - ) - - mappings.extend( - self._get_mappings_for_api_elements( - [result for function in self.apiv1.functions.values() for result in function.results], - [result for function in self.apiv2.functions.values() for result in function.results], - self.differ.compute_result_similarity, - ), - ) + # Attribute und Result could be added here mappings.extend(self.differ.get_additional_mappings()) mappings.sort(key=Mapping.get_similarity, reverse=True) return mappings diff --git a/src/library_analyzer/processing/migration/model/_differ.py b/src/library_analyzer/processing/migration/model/_differ.py index 0db2a85c..4f0393c0 100644 --- a/src/library_analyzer/processing/migration/model/_differ.py +++ b/src/library_analyzer/processing/migration/model/_differ.py @@ -3,7 +3,7 @@ import re from abc import ABC, abstractmethod from dataclasses import dataclass -from typing import TYPE_CHECKING, TypeVar, Union +from typing import TYPE_CHECKING, TypeVar from Levenshtein import distance @@ -12,24 +12,24 @@ AbstractType, Attribute, Class, - ClassDocumentation, + ClassDocstring, Function, - FunctionDocumentation, + FunctionDocstring, Parameter, ParameterAssignment, - ParameterDocumentation, + ParameterDocstring, Result, UnionType, ) -from ._get_not_mapped_api_elements import _get_not_mapped_api_elements +from ._get_unmapped_api_elements import _get_unmapped_api_elements if TYPE_CHECKING: from collections.abc import Callable, Sequence from ._mapping import Mapping -api_element = Union[Attribute, Class, Function, Parameter, Result] +api_element = Attribute | Class | Function | Parameter | Result @dataclass @@ -228,63 +228,67 @@ def __init__( apiv2: API, ) -> None: super().__init__(previous_base_differ, previous_mappings, apiv1, apiv2) - self.related_mappings = _get_not_mapped_api_elements(self.previous_mappings, self.apiv1, self.apiv2) + self.related_mappings = _get_unmapped_api_elements(self.previous_mappings, self.apiv1, self.apiv2) distance_between_implicit_and_explicit = 0.3 distance_between_vararg_and_normal = 0.3 distance_between_position_and_named = 0.3 - distance_between_both_to_one = 0.15 - distance_between_one_to_both = 0.15 + distance_between_both_to_one = distance_between_position_and_named / 2 + distance_between_one_to_both = distance_between_position_and_named / 2 self.assigned_by_look_up_similarity = { ParameterAssignment.IMPLICIT: { ParameterAssignment.IMPLICIT: 1.0, - ParameterAssignment.NAMED_VARARG: 1.0 - - distance_between_implicit_and_explicit - - distance_between_vararg_and_normal - - distance_between_position_and_named, - ParameterAssignment.POSITIONAL_VARARG: 1.0 - - distance_between_implicit_and_explicit - - distance_between_vararg_and_normal, + ParameterAssignment.NAMED_VARARG: ( + 1.0 + - distance_between_implicit_and_explicit + - distance_between_vararg_and_normal + - distance_between_position_and_named + ), + ParameterAssignment.POSITIONAL_VARARG: ( + 1.0 - distance_between_implicit_and_explicit - distance_between_vararg_and_normal + ), ParameterAssignment.POSITION_OR_NAME: 1.0 - distance_between_implicit_and_explicit, ParameterAssignment.NAME_ONLY: 1.0 - distance_between_implicit_and_explicit, ParameterAssignment.POSITION_ONLY: 1.0 - distance_between_implicit_and_explicit, }, ParameterAssignment.NAMED_VARARG: { - ParameterAssignment.IMPLICIT: 1.0 - - distance_between_implicit_and_explicit - - distance_between_vararg_and_normal - - distance_between_position_and_named, + ParameterAssignment.IMPLICIT: ( + 1.0 + - distance_between_implicit_and_explicit + - distance_between_vararg_and_normal + - distance_between_position_and_named + ), ParameterAssignment.NAMED_VARARG: 1.0, ParameterAssignment.POSITIONAL_VARARG: 1.0 - distance_between_position_and_named, - ParameterAssignment.POSITION_OR_NAME: 1.0 - - distance_between_vararg_and_normal - - distance_between_one_to_both, + ParameterAssignment.POSITION_OR_NAME: ( + 1.0 - distance_between_vararg_and_normal - distance_between_one_to_both + ), ParameterAssignment.NAME_ONLY: 1.0 - distance_between_vararg_and_normal, - ParameterAssignment.POSITION_ONLY: 1.0 - - distance_between_vararg_and_normal - - distance_between_position_and_named, + ParameterAssignment.POSITION_ONLY: ( + 1.0 - distance_between_vararg_and_normal - distance_between_position_and_named + ), }, ParameterAssignment.POSITIONAL_VARARG: { - ParameterAssignment.IMPLICIT: 1.0 - - distance_between_implicit_and_explicit - - distance_between_vararg_and_normal, + ParameterAssignment.IMPLICIT: ( + 1.0 - distance_between_implicit_and_explicit - distance_between_vararg_and_normal + ), ParameterAssignment.NAMED_VARARG: 1.0 - distance_between_position_and_named, ParameterAssignment.POSITIONAL_VARARG: 1.0, - ParameterAssignment.POSITION_OR_NAME: 1.0 - - distance_between_vararg_and_normal - - distance_between_one_to_both, - ParameterAssignment.NAME_ONLY: 1.0 - - distance_between_vararg_and_normal - - distance_between_position_and_named, + ParameterAssignment.POSITION_OR_NAME: ( + 1.0 - distance_between_vararg_and_normal - distance_between_one_to_both + ), + ParameterAssignment.NAME_ONLY: ( + 1.0 - distance_between_vararg_and_normal - distance_between_position_and_named + ), ParameterAssignment.POSITION_ONLY: 1.0 - distance_between_vararg_and_normal, }, ParameterAssignment.POSITION_OR_NAME: { ParameterAssignment.IMPLICIT: 1.0 - distance_between_implicit_and_explicit, - ParameterAssignment.NAMED_VARARG: 1.0 - - distance_between_vararg_and_normal - - distance_between_both_to_one, - ParameterAssignment.POSITIONAL_VARARG: 1.0 - - distance_between_vararg_and_normal - - distance_between_both_to_one, + ParameterAssignment.NAMED_VARARG: ( + 1.0 - distance_between_vararg_and_normal - distance_between_both_to_one + ), + ParameterAssignment.POSITIONAL_VARARG: ( + 1.0 - distance_between_vararg_and_normal - distance_between_both_to_one + ), ParameterAssignment.POSITION_OR_NAME: 1.0, ParameterAssignment.NAME_ONLY: 1.0 - distance_between_both_to_one, ParameterAssignment.POSITION_ONLY: 1.0 - distance_between_both_to_one, @@ -292,18 +296,18 @@ def __init__( ParameterAssignment.NAME_ONLY: { ParameterAssignment.IMPLICIT: 1.0 - distance_between_implicit_and_explicit, ParameterAssignment.NAMED_VARARG: 1.0 - distance_between_vararg_and_normal, - ParameterAssignment.POSITIONAL_VARARG: 1.0 - - distance_between_vararg_and_normal - - distance_between_position_and_named, + ParameterAssignment.POSITIONAL_VARARG: ( + 1.0 - distance_between_vararg_and_normal - distance_between_position_and_named + ), ParameterAssignment.POSITION_OR_NAME: 1.0 - distance_between_one_to_both, ParameterAssignment.NAME_ONLY: 1.0, ParameterAssignment.POSITION_ONLY: 1.0 - distance_between_position_and_named, }, ParameterAssignment.POSITION_ONLY: { ParameterAssignment.IMPLICIT: 1.0 - distance_between_implicit_and_explicit, - ParameterAssignment.NAMED_VARARG: 1.0 - - distance_between_vararg_and_normal - - distance_between_position_and_named, + ParameterAssignment.NAMED_VARARG: ( + 1.0 - distance_between_vararg_and_normal - distance_between_position_and_named + ), ParameterAssignment.POSITIONAL_VARARG: 1.0 - distance_between_vararg_and_normal, ParameterAssignment.POSITION_OR_NAME: 1.0 - distance_between_one_to_both, ParameterAssignment.NAME_ONLY: 1.0 - distance_between_position_and_named, @@ -348,7 +352,7 @@ class from apiv2 id_similarity = self._compute_id_similarity(classv1.id, classv2.id) - documentation_similarity = self._compute_documentation_similarity(classv1.documentation, classv2.documentation) + documentation_similarity = self._compute_documentation_similarity(classv1.docstring, classv2.docstring) if documentation_similarity < 0: documentation_similarity = 0 normalize_similarity -= 1 @@ -433,8 +437,8 @@ def compute_function_similarity(self, functionv1: Function, functionv2: Function id_similarity = self._compute_id_similarity(functionv1.id, functionv2.id) documentation_similarity = self._compute_documentation_similarity( - functionv1.documentation, - functionv2.documentation, + functionv1.docstring, + functionv2.docstring, ) if documentation_similarity < 0: documentation_similarity = 0 @@ -504,7 +508,6 @@ def compute_parameter_similarity(self, parameterv1: Parameter, parameterv2: Para ) if parameter_assignment_similarity < 0: parameter_assignment_similarity = 0 - normalize_similarity -= 1 parameter_default_value_similarity = self._compute_default_value_similarity( parameterv1.default_value, parameterv2.default_value, @@ -513,8 +516,8 @@ def compute_parameter_similarity(self, parameterv1: Parameter, parameterv2: Para parameter_default_value_similarity = 0 normalize_similarity -= 1 parameter_documentation_similarity = self._compute_documentation_similarity( - parameterv1.documentation, - parameterv2.documentation, + parameterv1.docstring, + parameterv2.docstring, ) if parameter_documentation_similarity < 0: parameter_documentation_similarity = 0 @@ -635,8 +638,8 @@ def _compute_default_value_similarity( def _compute_documentation_similarity( self, - documentationv1: ClassDocumentation | FunctionDocumentation | ParameterDocumentation, - documentationv2: ClassDocumentation | FunctionDocumentation | ParameterDocumentation, + documentationv1: ClassDocstring | FunctionDocstring | ParameterDocstring, + documentationv2: ClassDocstring | FunctionDocstring | ParameterDocstring, ) -> float: if len(documentationv1.description) == len(documentationv2.description) == 0: return -1.0 diff --git a/src/library_analyzer/processing/migration/model/_get_not_mapped_api_elements.py b/src/library_analyzer/processing/migration/model/_get_unmapped_api_elements.py similarity index 68% rename from src/library_analyzer/processing/migration/model/_get_not_mapped_api_elements.py rename to src/library_analyzer/processing/migration/model/_get_unmapped_api_elements.py index 305ffffc..4c7bb16e 100644 --- a/src/library_analyzer/processing/migration/model/_get_not_mapped_api_elements.py +++ b/src/library_analyzer/processing/migration/model/_get_unmapped_api_elements.py @@ -15,7 +15,7 @@ api_element = Union[Attribute, Class, Function, Parameter, Result] -def _get_not_mapped_api_elements(previous_mappings: list[Mapping], apiv1: API, apiv2: API) -> list[Mapping]: +def _get_unmapped_api_elements(previous_mappings: list[Mapping], apiv1: API, apiv2: API) -> list[Mapping]: related_mappings = [] mapped_apiv1_elements = [element for mapping in previous_mappings for element in mapping.get_apiv1_elements()] mapped_apiv2_elements = [element for mapping in previous_mappings for element in mapping.get_apiv2_elements()] @@ -26,33 +26,33 @@ def _get_not_mapped_api_elements(previous_mappings: list[Mapping], apiv1: API, a lambda api: api.parameters().values(), lambda api: api.results().values(), ]: - not_mapped_elements_mapping = _get_not_mapped_api_elements_for_type( + unmapped_elements_mapping = _get_unmapped_api_elements_for_type( apiv1, apiv2, mapped_apiv1_elements, mapped_apiv2_elements, get_api_element_for_type, ) - if not_mapped_elements_mapping is not None: - related_mappings.append(not_mapped_elements_mapping) + if unmapped_elements_mapping is not None: + related_mappings.append(unmapped_elements_mapping) return related_mappings -def _get_not_mapped_api_elements_for_type( +def _get_unmapped_api_elements_for_type( apiv1: API, apiv2: API, mapped_apiv1_elements: list[api_element], mapped_apiv2_elements: list[api_element], get_api_element_for_type: Callable[[API], list[api_element]], ) -> Mapping | None: - not_mapped_v1_elements = [] + unmapped_v1_elements = [] for api_elementv1 in get_api_element_for_type(apiv1): if api_elementv1 not in mapped_apiv1_elements: - not_mapped_v1_elements.append(api_elementv1) - not_mapped_v2_elements = [] + unmapped_v1_elements.append(api_elementv1) + unmapped_v2_elements = [] for api_elementv2 in get_api_element_for_type(apiv2): if api_elementv2 not in mapped_apiv2_elements: - not_mapped_v2_elements.append(api_elementv2) - if len(not_mapped_v1_elements) > 0 and len(not_mapped_v2_elements) > 0: - return ManyToManyMapping(-1.0, not_mapped_v1_elements, not_mapped_v2_elements) + unmapped_v2_elements.append(api_elementv2) + if len(unmapped_v1_elements) > 0 and len(unmapped_v2_elements) > 0: + return ManyToManyMapping(-1.0, unmapped_v1_elements, unmapped_v2_elements) return None diff --git a/src/library_analyzer/processing/migration/model/_inheritance_differ.py b/src/library_analyzer/processing/migration/model/_inheritance_differ.py index 816476b6..c9763673 100644 --- a/src/library_analyzer/processing/migration/model/_inheritance_differ.py +++ b/src/library_analyzer/processing/migration/model/_inheritance_differ.py @@ -10,7 +10,7 @@ ) from ._differ import AbstractDiffer -from ._get_not_mapped_api_elements import _get_not_mapped_api_elements +from ._get_unmapped_api_elements import _get_unmapped_api_elements from ._mapping import Mapping api_element = Union[Attribute, Class, Function, Parameter, Result] @@ -35,7 +35,7 @@ def __init__( self.boost_value = boost_value self.inheritance = {} self.new_mappings = [] - self.related_mappings = _get_not_mapped_api_elements(self.previous_mappings, self.apiv1, self.apiv2) + self.related_mappings = _get_unmapped_api_elements(self.previous_mappings, self.apiv1, self.apiv2) for class_v2 in self.apiv2.classes.values(): additional_v1_elements = [] for mapping in previous_mappings: diff --git a/src/library_analyzer/processing/migration/model/_strict_differ.py b/src/library_analyzer/processing/migration/model/_strict_differ.py index b95265dc..d73a07ea 100644 --- a/src/library_analyzer/processing/migration/model/_strict_differ.py +++ b/src/library_analyzer/processing/migration/model/_strict_differ.py @@ -1,4 +1,4 @@ -from typing import TypeVar, Union +from typing import TypeVar from library_analyzer.processing.api.model import ( API, @@ -10,10 +10,10 @@ ) from ._differ import AbstractDiffer -from ._mapping import Mapping +from ._mapping import Mapping, OneToOneMapping DEPENDENT_API_ELEMENTS = TypeVar("DEPENDENT_API_ELEMENTS", Function, Attribute, Parameter, Result) -api_element = Union[Attribute, Class, Function, Parameter, Result] +api_element = Attribute | Class | Function | Parameter | Result class StrictDiffer(AbstractDiffer): @@ -54,7 +54,15 @@ def __init__( self.previous_mappings, key=lambda mapping: sort_order[type(mapping.get_apiv1_elements()[0])], ) - self.related_mappings = [mapping for mapping in self.related_mappings if mapping not in unchanged_mappings] + self.related_mappings = [ + mapping + for mapping in self.related_mappings + if mapping not in unchanged_mappings and not isinstance(mapping, OneToOneMapping) + ] + for mapping_list in [self.previous_mappings, unchanged_mappings]: + for mapping in mapping_list: + if mapping not in self.related_mappings: + self.new_mappings[type(mapping.get_apiv1_elements()[0])].append(mapping) self.unchanged_mappings = unchanged_mappings def get_related_mappings( diff --git a/src/library_analyzer/processing/migration/model/_unchanged_differ.py b/src/library_analyzer/processing/migration/model/_unchanged_differ.py index dc64bd94..9680d5f7 100644 --- a/src/library_analyzer/processing/migration/model/_unchanged_differ.py +++ b/src/library_analyzer/processing/migration/model/_unchanged_differ.py @@ -26,12 +26,12 @@ def __init__( self.unchanged_api_mappings: list[Mapping] = [] for classv1 in apiv1.classes.values(): classv2 = apiv2.classes.get(classv1.id, None) - if classv2 is not None and self.have_same_api(classv1, classv2): + if classv2 is not None: self.unchanged_api_mappings.append(OneToOneMapping(1.0, classv1, classv2)) for functionv1 in apiv1.functions.values(): functionv2 = apiv2.functions.get(functionv1.id, None) - if functionv2 is not None and self.have_same_api(functionv1, functionv2): + if functionv2 is not None: self.unchanged_api_mappings.append(OneToOneMapping(1.0, functionv1, functionv2)) for parameterv1 in apiv1.parameters().values(): @@ -48,6 +48,9 @@ def __init__( resultv2 = apiv2.results().get(f"{resultv1.function_id}/{resultv1.name}", None) if resultv2 is not None and self.have_same_api(resultv1, resultv2): self.unchanged_api_mappings.append(OneToOneMapping(1.0, resultv1, resultv2)) + if parameterv2 is not None: + self.unchanged_api_mappings.append(OneToOneMapping(-1.0, parameterv1, parameterv2)) + # Attribute und Result could be added here API_ELEMENTS = TypeVar("API_ELEMENTS", Attribute, Class, Function, Parameter, Result) @@ -76,7 +79,7 @@ def compute_attribute_similarity(self, attributev1: Attribute, attributev2: Attr """ return 0.0 - def compute_class_similarity(self, classv1: Class, classv2: Class) -> float: # noqa: ARG002 + def compute_class_similarity(self, classv1: Class, classv2: Class) -> float: """ Compute the similarity between classes from apiv1 and apiv2. @@ -92,9 +95,11 @@ class from apiv2 similarity : float value between 0 and 1, where 1 means that the elements are equal. """ + if self.have_same_api(classv1, classv2): + return 1.0 return 0.0 - def compute_function_similarity(self, functionv1: Function, functionv2: Function) -> float: # noqa: ARG002 + def compute_function_similarity(self, functionv1: Function, functionv2: Function) -> float: """ Compute the similarity between functions from apiv1 and apiv2. @@ -110,9 +115,11 @@ def compute_function_similarity(self, functionv1: Function, functionv2: Function similarity : float value between 0 and 1, where 1 means that the elements are equal. """ + if self.have_same_api(functionv1, functionv2): + return 1.0 return 0.0 - def compute_parameter_similarity(self, parameterv1: Parameter, parameterv2: Parameter) -> float: # noqa: ARG002 + def compute_parameter_similarity(self, parameterv1: Parameter, parameterv2: Parameter) -> float: """ Compute similarity between parameters from apiv1 and apiv2. @@ -128,6 +135,8 @@ def compute_parameter_similarity(self, parameterv1: Parameter, parameterv2: Para similarity : float value between 0 and 1, where 1 means that the elements are equal. """ + if self.have_same_api(parameterv1, parameterv2): + return 1.0 return 0.0 def compute_result_similarity(self, resultv1: Result, resultv2: Result) -> float: # noqa: ARG002 @@ -157,7 +166,7 @@ def get_related_mappings(self) -> list[Mapping] | None: mappings : list[Mapping] | None a list of Mappings if only previously mapped api elements should be mapped to each other or else None. """ - return [] + return self.unchanged_api_mappings def notify_new_mapping(self, mappings: list[Mapping]) -> None: """ @@ -180,4 +189,4 @@ def get_additional_mappings(self) -> list[Mapping]: mappings : list[Mapping] additional mappings that should be included in the result of the differentiation. """ - return self.unchanged_api_mappings + return [] diff --git a/src/library_analyzer/processing/usages/model/_usages.py b/src/library_analyzer/processing/usages/model/_usages.py index 5bb681cb..b72ab4e8 100644 --- a/src/library_analyzer/processing/usages/model/_usages.py +++ b/src/library_analyzer/processing/usages/model/_usages.py @@ -1,7 +1,13 @@ from __future__ import annotations +import json from collections import Counter -from typing import Any +from typing import TYPE_CHECKING, Any + +from library_analyzer.utils import ensure_file_exists + +if TYPE_CHECKING: + from pathlib import Path USAGES_SCHEMA_VERSION = 1 @@ -15,27 +21,34 @@ class UsageCountStore: """Count how often classes, functions, parameters, and parameter values are used.""" @staticmethod - def from_json(json: Any) -> UsageCountStore: + def from_json_file(path: Path) -> UsageCountStore: + with path.open(encoding="utf-8") as usages_file: + usages_json = json.load(usages_file) + + return UsageCountStore.from_dict(usages_json) + + @staticmethod + def from_dict(d: dict[str, Any]) -> UsageCountStore: """Create an instance of this class from a dictionary.""" result = UsageCountStore() # Revive class counts - class_counts = json["class_counts"] + class_counts = d["class_counts"] for class_id, count in class_counts.items(): result.add_class_usages(class_id, count) # Revive function counts - function_counts = json["function_counts"] + function_counts = d["function_counts"] for function_id, count in function_counts.items(): result.add_function_usages(function_id, count) # Revive parameter counts - parameter_counts = json["parameter_counts"] + parameter_counts = d["parameter_counts"] for parameter_id, count in parameter_counts.items(): result.add_parameter_usages(parameter_id, count) # Revive value counts - value_counts = json["value_counts"] + value_counts = d["value_counts"] for parameter_id, values in value_counts.items(): for value, count in values.items(): result.add_value_usages(parameter_id, value, count) @@ -166,7 +179,12 @@ def merge_other_into_self(self, other_usage_store: UsageCountStore) -> UsageCoun return self - def to_json(self) -> Any: + def to_json_file(self, path: Path) -> None: + ensure_file_exists(path) + with path.open("w") as f: + json.dump(self.to_dict(), f, indent=2) + + def to_dict(self) -> dict[str, Any]: """Convert this class to a dictionary, which can later be serialized as JSON.""" return { "schemaVersion": USAGES_SCHEMA_VERSION, diff --git a/src/library_analyzer/utils/__init__.py b/src/library_analyzer/utils/__init__.py index 29f751c9..32c2d90f 100644 --- a/src/library_analyzer/utils/__init__.py +++ b/src/library_analyzer/utils/__init__.py @@ -2,6 +2,7 @@ from ._ast_walker import ASTWalker from ._files import ensure_file_exists, initialize_and_read_exclude_file, list_files +from ._load_language import load_language from ._names import declaration_qname_to_name, parent_id, parent_qualified_name from ._parsing import parse_python_code from ._strings import pluralize @@ -12,6 +13,7 @@ "ensure_file_exists", "initialize_and_read_exclude_file", "list_files", + "load_language", "parse_python_code", "parent_id", "parent_qualified_name", diff --git a/src/library_analyzer/utils/_load_language.py b/src/library_analyzer/utils/_load_language.py new file mode 100644 index 00000000..510467af --- /dev/null +++ b/src/library_analyzer/utils/_load_language.py @@ -0,0 +1,22 @@ +import spacy + + +def load_language(name: str) -> spacy.Language: + """ + Safely load a Spacy language model. + + Parameters + ---------- + name: str + The name of the language model to load. + + Returns + ------- + spacy.Language + The loaded language model. + """ + try: + return spacy.load(name) + except OSError: + spacy.cli.download(name) + return spacy.load(name) diff --git a/src/refined_types/__init__.py b/src/refined_types/__init__.py deleted file mode 100644 index 88e21a6f..00000000 --- a/src/refined_types/__init__.py +++ /dev/null @@ -1 +0,0 @@ -"""A prototype to infer refined types like literal types or bounded numbers.""" diff --git a/src/refined_types/_main.py b/src/refined_types/_main.py deleted file mode 100644 index d236ece3..00000000 --- a/src/refined_types/_main.py +++ /dev/null @@ -1,67 +0,0 @@ -""" -Iterate over the `json` files inside the `sklearn` folder and concatenate them to form `ground_truth.json`. - -Usage: ------- -python sync.py -""" - -import glob -import json -from pathlib import Path - - -def sync() -> None: - ground_truth = {} - for filepath in glob.glob("sklearn/**/*json", recursive=True): - with Path(filepath).open() as fin: - json_file = json.load(fin) - ground_truth.update(json_file) - - with Path("ground_truth.json").open("w") as fout: - json.dump(ground_truth, fout, indent=4) - - -def get_class_name(filepath: str) -> str: - filepath = filepath.replace("/", ".") - filepath = filepath.removesuffix(".json") - return filepath - - -def get_ground_truth() -> dict: - with Path("ground_truth.json").open() as fin: - return json.load(fin) - - -def get_boundaries() -> None: - ground_truth = get_ground_truth() - boundaries = {} - for cls, props in ground_truth.items(): - for prop_name, prop_body in props.items(): - ref_type = prop_body["refined_type"] - if ref_type["kind"] == "BoundaryType": - key = f"{cls}.{prop_name}" - boundaries[key] = prop_body - if ref_type["kind"] == "UnionType": - for type_ in ref_type["types"]: - if type_["kind"] == "BoundaryType": - key = f"{cls}.{prop_name}" - boundaries[key] = prop_body - with Path("boundaries.json").open("w") as fout: - json.dump(boundaries, fout, indent=4) - - -def stats() -> None: - with Path("ground_truth.json").open() as fin: - ground_truth = json.load(fin) - - num_classes = len(ground_truth) - num_props = sum(len(ground_truth[class_name]) for class_name in ground_truth) - - print(f"Number of classes: {num_classes}") # noqa: T201 - print(f"Number of properties with refined types: {num_props}") # noqa: T201 - - -if __name__ == "__main__": - sync() - get_boundaries() diff --git a/src/refined_types/sklearn/cluster/KMeans.json b/src/refined_types/sklearn/cluster/KMeans.json deleted file mode 100644 index 241f26dc..00000000 --- a/src/refined_types/sklearn/cluster/KMeans.json +++ /dev/null @@ -1,37 +0,0 @@ -{ - "sklearn.cluster.KMeans": { - "init": { - "refined_type": { - "kind": "UnionType", - "types": [ - { - "kind": "NamedType", - "name": "callable" - }, - { - "kind": "NamedType", - "name": "array-like" - }, - { - "kind": "EnumType", - "values": ["k-means++", "random"] - } - ] - }, - "docstring": { - "type": "{'k-means++', 'random'}, callable or array-like of shape (n_clusters, n_features), default='k-means++'", - "description": "Method for initialization:\n\n'k-means++' : selects initial cluster centers for k-mean\nclustering in a smart way to speed up convergence. See section\nNotes in k_init for more details.\n\n'random': choose `n_clusters` observations (rows) at random from data\nfor the initial centroids.\n\nIf an array is passed, it should be of shape (n_clusters, n_features)\nand gives the initial centers.\n\nIf a callable is passed, it should take arguments X, n_clusters and a\nrandom state and return an initialization." - } - }, - "algorithm": { - "refined_type": { - "kind": "EnumType", - "values": ["full", "elkan", "auto"] - }, - "docstring": { - "type": "{'auto', 'full', 'elkan'}, default='auto'", - "description": "K-means algorithm to use. The classical EM-style algorithm is \"full\". The \"elkan\" variation is more efficient on data with well-defined clusters, by using the triangle inequality. However it’s more memory intensive due to the allocation of an extra array of shape (n_samples, n_clusters).\n\nFor now \"auto\" (kept for backward compatibility) chooses \"elkan\" but it might change in the future for a better heuristic.\n\n" - } - } - } -} diff --git a/src/refined_types/sklearn/decomposition/PCA.json b/src/refined_types/sklearn/decomposition/PCA.json deleted file mode 100644 index 60b93a3a..00000000 --- a/src/refined_types/sklearn/decomposition/PCA.json +++ /dev/null @@ -1,74 +0,0 @@ -{ - "sklearn.decomposition.PCA": { - "n_components": { - "refined_type": { - "kind": "UnionType", - "types": [ - { - "kind": "NamedType", - "name": "int" - }, - { - "kind": "NamedType", - "name": "float" - }, - { - "kind": "EnumType", - "values": ["mle"] - } - ] - }, - "docstring": { - "type": "int, float or 'mle', default=None", - "description": "Number of components to keep.\nif n_components is not set all components are kept::\n\n n_components == min(n_samples, n_features)\n\nIf ``n_components == 'mle'`` and ``svd_solver == 'full'``, Minka's\nMLE is used to guess the dimension. Use of ``n_components == 'mle'``\nwill interpret ``svd_solver == 'auto'`` as ``svd_solver == 'full'``.\n\nIf ``0 < n_components < 1`` and ``svd_solver == 'full'``, select the\nnumber of components such that the amount of variance that needs to be\nexplained is greater than the percentage specified by n_components.\n\nIf ``svd_solver == 'arpack'``, the number of components must be\nstrictly less than the minimum of n_features and n_samples.\n\nHence, the None case results in::\n\n n_components == min(n_samples, n_features) - 1" - } - }, - "svd_solver": { - "refined_type": { - "kind": "EnumType", - "values": ["full", "arpack", "randomized", "auto"] - }, - "docstring": { - "type": "{'auto', 'full', 'arpack', 'randomized'}, default='auto'", - "description": "If auto :\n The solver is selected by a default policy based on `X.shape` and\n `n_components`: if the input data is larger than 500x500 and the\n number of components to extract is lower than 80% of the smallest\n dimension of the data, then the more efficient 'randomized'\n method is enabled. Otherwise the exact full SVD is computed and\n optionally truncated afterwards.\nIf full :\n run exact full SVD calling the standard LAPACK solver via\n `scipy.linalg.svd` and select the components by postprocessing\nIf arpack :\n run SVD truncated to n_components calling ARPACK solver via\n `scipy.sparse.linalg.svds`. It requires strictly\n 0 < n_components < min(X.shape)\nIf randomized :\n run randomized SVD by the method of Halko et al.\n\n.. versionadded:: 0.18.0" - } - }, - "tol": { - "refined_type": { - "kind": "BoundaryType", - "baseType": "float", - "min": 0, - "minInclusive": true, - "max": null, - "maxInclusive": false - }, - "docstring": { - "type": "float, default=0.0", - "description": "Tolerance for singular values computed by svd_solver == 'arpack'.\nMust be of range [0.0, infinity).\n\n.. versionadded:: 0.18.0" - } - }, - "iterated_power": { - "refined_type": { - "kind": "UnionType", - "types": [ - { - "kind": "BoundaryType", - "baseType": "int", - "min": 0, - "minInclusive": true, - "max": null, - "maxInclusive": false - }, - { - "kind": "EnumType", - "values": ["auto"] - } - ] - }, - "docstring": { - "type": "int or 'auto', default='auto'", - "description": "Number of iterations for the power method computed by\nsvd_solver == 'randomized'.\nMust be of range [0, infinity).\n\n.. versionadded:: 0.18.0" - } - } - } -} diff --git a/src/refined_types/sklearn/decomposition/TruncatedSVD.json b/src/refined_types/sklearn/decomposition/TruncatedSVD.json deleted file mode 100644 index b0226524..00000000 --- a/src/refined_types/sklearn/decomposition/TruncatedSVD.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "sklearn.decomposition.TruncatedSVD": { - "algorithm": { - "refined_type": { - "kind": "EnumType", - "values": ["arpack", "randomized"] - }, - "docstring": { - "type": "{'arpack', 'randomized'}, default='randomized'", - "description": "SVD solver to use. Either \"arpack\" for the ARPACK wrapper in SciPy\n(scipy.sparse.linalg.svds), or \"randomized\" for the randomized\nalgorithm due to Halko (2009)." - } - } - } -} diff --git a/src/refined_types/sklearn/ensemble/AdaBoostClassifier.json b/src/refined_types/sklearn/ensemble/AdaBoostClassifier.json deleted file mode 100644 index bcaa3391..00000000 --- a/src/refined_types/sklearn/ensemble/AdaBoostClassifier.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "sklearn.ensemble.AdaBoostClassifier": { - "algorithm": { - "refined_type": { - "kind": "EnumType", - "values": ["SAMME", "SAMME.R"] - }, - "docstring": { - "type": "{'SAMME', 'SAMME.R'}, default='SAMME.R'", - "description": "If 'SAMME.R' then use the SAMME.R real boosting algorithm.\n``base_estimator`` must support calculation of class probabilities.\nIf 'SAMME' then use the SAMME discrete boosting algorithm.\nThe SAMME.R algorithm typically converges faster than SAMME,\nachieving a lower test error with fewer boosting iterations." - } - } - } -} diff --git a/src/refined_types/sklearn/ensemble/ExtraTreesClassifier.json b/src/refined_types/sklearn/ensemble/ExtraTreesClassifier.json deleted file mode 100644 index 2f4e0daa..00000000 --- a/src/refined_types/sklearn/ensemble/ExtraTreesClassifier.json +++ /dev/null @@ -1,97 +0,0 @@ -{ - "sklearn.ensemble.ExtraTreesClassifier": { - "criterion": { - "refined_type": { - "kind": "EnumType", - "values": ["gini", "entropy"] - }, - "docstring": { - "type": "{\"gini\", \"entropy\"}, default=\"gini\"", - "description": "The function to measure the quality of a split. Supported criteria are\n\"gini\" for the Gini impurity and \"entropy\" for the information gain." - } - }, - "max_features": { - "refined_type": { - "kind": "UnionType", - "types": [ - { - "kind": "EnumType", - "values": ["sqrt", "auto", "log2"] - }, - { - "kind": "NamedType", - "name": "int" - }, - { - "kind": "NamedType", - "name": "float" - } - ] - }, - "docstring": { - "type": "{\"auto\", \"sqrt\", \"log2\"}, int or float, default=\"auto\"", - "description": "The number of features to consider when looking for the best split:\n\n- If int, then consider `max_features` features at each split.\n- If float, then `max_features` is a fraction and\n `round(max_features * n_features)` features are considered at each\n split.\n- If \"auto\", then `max_features=sqrt(n_features)`.\n- If \"sqrt\", then `max_features=sqrt(n_features)`.\n- If \"log2\", then `max_features=log2(n_features)`.\n- If None, then `max_features=n_features`.\n\nNote: the search for a split does not stop until at least one\nvalid partition of the node samples is found, even if it requires to\neffectively inspect more than ``max_features`` features." - } - }, - "class_weight": { - "refined_type": { - "kind": "UnionType", - "types": [ - { - "kind": "EnumType", - "values": ["balanced", "balanced_subsample"] - }, - { - "kind": "NamedType", - "name": "dict" - }, - { - "kind": "NamedType", - "name": "list of dicts" - } - ] - }, - "docstring": { - "type": "{\"balanced\", \"balanced_subsample\"}, dict or list of dicts, default=None", - "description": "Weights associated with classes in the form ``{class_label: weight}``.\nIf not given, all classes are supposed to have weight one. For\nmulti-output problems, a list of dicts can be provided in the same\norder as the columns of y.\n\nNote that for multioutput (including multilabel) weights should be\ndefined for each class of every column in its own dict. For example,\nfor four-class multilabel classification weights should be\n[{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of\n[{1:1}, {2:5}, {3:1}, {4:1}].\n\nThe \"balanced\" mode uses the values of y to automatically adjust\nweights inversely proportional to class frequencies in the input data\nas ``n_samples / (n_classes * np.bincount(y))``\n\nThe \"balanced_subsample\" mode is the same as \"balanced\" except that\nweights are computed based on the bootstrap sample for every tree\ngrown.\n\nFor multi-output, the weights of each column of y will be multiplied.\n\nNote that these weights will be multiplied with sample_weight (passed\nthrough the fit method) if sample_weight is specified." - } - }, - "ccp_alpha": { - "refined_type": { - "kind": "BoundaryType", - "baseType": "float", - "min": 0, - "minInclusive": true, - "max": null, - "maxInclusive": false - }, - "docstring": { - "type": "non-negative float, default=0.0", - "description": "Complexity parameter used for Minimal Cost-Complexity Pruning. The\nsubtree with the largest cost complexity that is smaller than\n``ccp_alpha`` will be chosen. By default, no pruning is performed. See\n:ref:`minimal_cost_complexity_pruning` for details.\n\n.. versionadded:: 0.22" - } - }, - "max_samples": { - "refined_type": { - "kind": "UnionType", - "types": [ - { - "kind": "NamedType", - "name": "int" - }, - { - "kind": "BoundaryType", - "baseType": "float", - "min": 0, - "minInclusive": false, - "max": 1, - "maxInclusive": true - } - ] - }, - "docstring": { - "type": "int or float, default=None", - "description": "If bootstrap is True, the number of samples to draw from X\nto train each base estimator.\n\n- If None (default), then draw `X.shape[0]` samples.\n- If int, then draw `max_samples` samples.\n- If float, then draw `max_samples * X.shape[0]` samples. Thus,\n `max_samples` should be in the interval `(0.0, 1.0]`.\n\n.. versionadded:: 0.22" - } - } - } -} diff --git a/src/refined_types/sklearn/ensemble/GradientBoostingRegressor.json b/src/refined_types/sklearn/ensemble/GradientBoostingRegressor.json deleted file mode 100644 index c01c20e5..00000000 --- a/src/refined_types/sklearn/ensemble/GradientBoostingRegressor.json +++ /dev/null @@ -1,75 +0,0 @@ -{ - "sklearn.ensemble.GradientBoostingRegressor": { - "loss": { - "refined_type": { - "kind": "EnumType", - "values": ["quantile", "huber", "absolute_error", "squared_error"] - }, - "docstring": { - "type": "{'squared_error', 'absolute_error', 'huber', 'quantile'}, default='squared_error'", - "description": "Loss function to be optimized. 'squared_error' refers to the squared\nerror for regression. 'absolute_error' refers to the absolute error of\nregression and is a robust loss function. 'huber' is a\ncombination of the two. 'quantile' allows quantile regression (use\n`alpha` to specify the quantile).\n\n.. deprecated:: 1.0\n The loss 'ls' was deprecated in v1.0 and will be removed in\n version 1.2. Use `loss='squared_error'` which is equivalent.\n\n.. deprecated:: 1.0\n The loss 'lad' was deprecated in v1.0 and will be removed in\n version 1.2. Use `loss='absolute_error'` which is equivalent." - } - }, - "criterion": { - "refined_type": { - "kind": "EnumType", - "values": ["squared_error", "mse", "friedman_mse", "mae"] - }, - "docstring": { - "type": "{'friedman_mse', 'squared_error', 'mse', 'mae'}, default='friedman_mse'", - "description": "The function to measure the quality of a split. Supported criteria\nare \"friedman_mse\" for the mean squared error with improvement\nscore by Friedman, \"squared_error\" for mean squared error, and \"mae\"\nfor the mean absolute error. The default value of \"friedman_mse\" is\ngenerally the best as it can provide a better approximation in some\ncases.\n\n.. versionadded:: 0.18\n\n.. deprecated:: 0.24\n `criterion='mae'` is deprecated and will be removed in version\n 1.1 (renaming of 0.26). The correct way of minimizing the absolute\n error is to use `loss='absolute_error'` instead.\n\n.. deprecated:: 1.0\n Criterion 'mse' was deprecated in v1.0 and will be removed in\n version 1.2. Use `criterion='squared_error'` which is equivalent." - } - }, - "max_features": { - "refined_type": { - "kind": "UnionType", - "types": [ - { - "kind": "EnumType", - "values": ["sqrt", "auto", "log2"] - }, - { - "kind": "NamedType", - "name": "float" - }, - { - "kind": "NamedType", - "name": "int" - } - ] - }, - "docstring": { - "type": "{'auto', 'sqrt', 'log2'}, int or float, default=None", - "description": "The number of features to consider when looking for the best split:\n\n- If int, then consider `max_features` features at each split.\n- If float, then `max_features` is a fraction and\n `int(max_features * n_features)` features are considered at each\n split.\n- If \"auto\", then `max_features=n_features`.\n- If \"sqrt\", then `max_features=sqrt(n_features)`.\n- If \"log2\", then `max_features=log2(n_features)`.\n- If None, then `max_features=n_features`.\n\nChoosing `max_features < n_features` leads to a reduction of variance\nand an increase in bias.\n\nNote: the search for a split does not stop until at least one\nvalid partition of the node samples is found, even if it requires to\neffectively inspect more than ``max_features`` features." - } - }, - "validation_fraction": { - "refined_type": { - "kind": "BoundaryType", - "baseType": "float", - "min": 0, - "minInclusive": false, - "max": 1, - "maxInclusive": false - }, - "docstring": { - "type": "float, default=0.1", - "description": "The proportion of training data to set aside as validation set for\nearly stopping. Must be between 0 and 1.\nOnly used if ``n_iter_no_change`` is set to an integer.\n\n.. versionadded:: 0.20" - } - }, - "ccp_alpha": { - "refined_type": { - "kind": "BoundaryType", - "baseType": "float", - "min": 0, - "minInclusive": true, - "max": null, - "maxInclusive": false - }, - "docstring": { - "type": "non-negative float, default=0.0", - "description": "Complexity parameter used for Minimal Cost-Complexity Pruning. The\nsubtree with the largest cost complexity that is smaller than\n``ccp_alpha`` will be chosen. By default, no pruning is performed. See\n:ref:`minimal_cost_complexity_pruning` for details.\n\n.. versionadded:: 0.22" - } - } - } -} diff --git a/src/refined_types/sklearn/ensemble/RandomForestClassifier.json b/src/refined_types/sklearn/ensemble/RandomForestClassifier.json deleted file mode 100644 index 20b12dda..00000000 --- a/src/refined_types/sklearn/ensemble/RandomForestClassifier.json +++ /dev/null @@ -1,97 +0,0 @@ -{ - "sklearn.ensemble.RandomForestClassifier": { - "criterion": { - "refined_type": { - "kind": "EnumType", - "values": ["gini", "entropy"] - }, - "docstring": { - "type": "{\"gini\", \"entropy\"}, default=\"gini\"", - "description": "The function to measure the quality of a split. Supported criteria are\n\"gini\" for the Gini impurity and \"entropy\" for the information gain.\nNote: this parameter is tree-specific." - } - }, - "max_features": { - "refined_type": { - "kind": "UnionType", - "types": [ - { - "kind": "EnumType", - "values": ["sqrt", "auto", "log2"] - }, - { - "kind": "NamedType", - "name": "int" - }, - { - "kind": "NamedType", - "name": "float" - } - ] - }, - "docstring": { - "type": "{\"auto\", \"sqrt\", \"log2\"}, int or float, default=\"auto\"", - "description": "The number of features to consider when looking for the best split:\n\n- If int, then consider `max_features` features at each split.\n- If float, then `max_features` is a fraction and\n `round(max_features * n_features)` features are considered at each\n split.\n- If \"auto\", then `max_features=sqrt(n_features)`.\n- If \"sqrt\", then `max_features=sqrt(n_features)` (same as \"auto\").\n- If \"log2\", then `max_features=log2(n_features)`.\n- If None, then `max_features=n_features`.\n\nNote: the search for a split does not stop until at least one\nvalid partition of the node samples is found, even if it requires to\neffectively inspect more than ``max_features`` features." - } - }, - "class_weight": { - "refined_type": { - "kind": "UnionType", - "types": [ - { - "kind": "EnumType", - "values": ["balanced", "balanced_subsample"] - }, - { - "kind": "NamedType", - "name": "dict" - }, - { - "kind": "NamedType", - "name": "list of dicts" - } - ] - }, - "docstring": { - "type": "{\"balanced\", \"balanced_subsample\"}, dict or list of dicts, default=None", - "description": "Weights associated with classes in the form ``{class_label: weight}``.\nIf not given, all classes are supposed to have weight one. For\nmulti-output problems, a list of dicts can be provided in the same\norder as the columns of y.\n\nNote that for multioutput (including multilabel) weights should be\ndefined for each class of every column in its own dict. For example,\nfor four-class multilabel classification weights should be\n[{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of\n[{1:1}, {2:5}, {3:1}, {4:1}].\n\nThe \"balanced\" mode uses the values of y to automatically adjust\nweights inversely proportional to class frequencies in the input data\nas ``n_samples / (n_classes * np.bincount(y))``\n\nThe \"balanced_subsample\" mode is the same as \"balanced\" except that\nweights are computed based on the bootstrap sample for every tree\ngrown.\n\nFor multi-output, the weights of each column of y will be multiplied.\n\nNote that these weights will be multiplied with sample_weight (passed\nthrough the fit method) if sample_weight is specified." - } - }, - "ccp_alpha": { - "refined_type": { - "kind": "BoundaryType", - "baseType": "float", - "min": 0, - "minInclusive": true, - "max": null, - "maxInclusive": false - }, - "docstring": { - "type": "non-negative float, default=0.0", - "description": "Complexity parameter used for Minimal Cost-Complexity Pruning. The\nsubtree with the largest cost complexity that is smaller than\n``ccp_alpha`` will be chosen. By default, no pruning is performed. See\n:ref:`minimal_cost_complexity_pruning` for details.\n\n.. versionadded:: 0.22" - } - }, - "max_samples": { - "refined_type": { - "kind": "UnionType", - "types": [ - { - "kind": "NamedType", - "name": "int" - }, - { - "kind": "BoundaryType", - "baseType": "float", - "min": 0, - "minInclusive": false, - "max": 1, - "maxInclusive": true - } - ] - }, - "docstring": { - "type": "int or float, default=None", - "description": "If bootstrap is True, the number of samples to draw from X\nto train each base estimator.\n\n- If None (default), then draw `X.shape[0]` samples.\n- If int, then draw `max_samples` samples.\n- If float, then draw `max_samples * X.shape[0]` samples. Thus,\n `max_samples` should be in the interval `(0.0, 1.0]`.\n\n.. versionadded:: 0.22" - } - } - } -} diff --git a/src/refined_types/sklearn/feature_extraction/text/CountVectorizer.json b/src/refined_types/sklearn/feature_extraction/text/CountVectorizer.json deleted file mode 100644 index 8a1c50e9..00000000 --- a/src/refined_types/sklearn/feature_extraction/text/CountVectorizer.json +++ /dev/null @@ -1,118 +0,0 @@ -{ - "sklearn.feature_extraction.text.CountVectorizer": { - "input": { - "refined_type": { - "kind": "EnumType", - "values": ["file", "filename", "content"] - }, - "docstring": { - "type": "{'filename', 'file', 'content'}, default='content'", - "description": "- If `'filename'`, the sequence passed as an argument to fit is\n expected to be a list of filenames that need reading to fetch\n the raw content to analyze.\n\n- If `'file'`, the sequence items must have a 'read' method (file-like\n object) that is called to fetch the bytes in memory.\n\n- If `'content'`, the input is expected to be a sequence of items that\n can be of type string or byte." - } - }, - "decode_error": { - "refined_type": { - "kind": "EnumType", - "values": ["ignore", "replace", "strict"] - }, - "docstring": { - "type": "{'strict', 'ignore', 'replace'}, default='strict'", - "description": "Instruction on what to do if a byte sequence is given to analyze that\ncontains characters not of the given `encoding`. By default, it is\n'strict', meaning that a UnicodeDecodeError will be raised. Other\nvalues are 'ignore' and 'replace'." - } - }, - "strip_accents": { - "refined_type": { - "kind": "EnumType", - "values": ["ascii", "unicode"] - }, - "docstring": { - "type": "{'ascii', 'unicode'}, default=None", - "description": "Remove accents and perform other character normalization\nduring the preprocessing step.\n'ascii' is a fast method that only works on characters that have\nan direct ASCII mapping.\n'unicode' is a slightly slower method that works on any characters.\nNone (default) does nothing.\n\nBoth 'ascii' and 'unicode' use NFKD normalization from\n:func:`unicodedata.normalize`." - } - }, - "stop_words": { - "refined_type": { - "kind": "UnionType", - "types": [ - { - "kind": "EnumType", - "values": ["english"] - }, - { - "kind": "NamedType", - "name": "list" - } - ] - }, - "docstring": { - "type": "{'english'}, list, default=None", - "description": "If 'english', a built-in stop word list for English is used.\nThere are several known issues with 'english' and you should\nconsider an alternative (see :ref:`stop_words`).\n\nIf a list, that list is assumed to contain stop words, all of which\nwill be removed from the resulting tokens.\nOnly applies if ``analyzer == 'word'``.\n\nIf None, no stop words will be used. max_df can be set to a value\nin the range [0.7, 1.0) to automatically detect and filter stop\nwords based on intra corpus document frequency of terms." - } - }, - "analyzer": { - "refined_type": { - "kind": "UnionType", - "types": [ - { - "kind": "EnumType", - "values": ["char_wb", "char", "word"] - }, - { - "kind": "NamedType", - "name": "callable" - } - ] - }, - "docstring": { - "type": "{'word', 'char', 'char_wb'} or callable, default='word'", - "description": "Whether the feature should be made of word n-gram or character\nn-grams.\nOption 'char_wb' creates character n-grams only from text inside\nword boundaries; n-grams at the edges of words are padded with space.\n\nIf a callable is passed it is used to extract the sequence of features\nout of the raw, unprocessed input.\n\n.. versionchanged:: 0.21\n\nSince v0.21, if ``input`` is ``filename`` or ``file``, the data is\nfirst read from the file and then passed to the given callable\nanalyzer." - } - }, - "max_df": { - "refined_type": { - "kind": "UnionType", - "types": [ - { - "kind": "BoundaryType", - "baseType": "float", - "min": 0, - "minInclusive": true, - "max": 1, - "maxInclusive": true - }, - { - "kind": "NamedType", - "name": "int" - } - ] - }, - "docstring": { - "type": "float in range [0.0, 1.0] or int, default=1.0", - "description": "When building the vocabulary ignore terms that have a document\nfrequency strictly higher than the given threshold (corpus-specific\nstop words).\nIf float, the parameter represents a proportion of documents, integer\nabsolute counts.\nThis parameter is ignored if vocabulary is not None." - } - }, - "min_df": { - "refined_type": { - "kind": "UnionType", - "types": [ - { - "kind": "BoundaryType", - "baseType": "float", - "min": 0, - "minInclusive": true, - "max": 1, - "maxInclusive": true - }, - { - "kind": "NamedType", - "name": "int" - } - ] - }, - "docstring": { - "type": "float in range [0.0, 1.0] or int, default=1", - "description": "When building the vocabulary ignore terms that have a document\nfrequency strictly lower than the given threshold. This value is also\ncalled cut-off in the literature.\nIf float, the parameter represents a proportion of documents, integer\nabsolute counts.\nThis parameter is ignored if vocabulary is not None." - } - } - } -} diff --git a/src/refined_types/sklearn/feature_extraction/text/TfidfVectorizer.json b/src/refined_types/sklearn/feature_extraction/text/TfidfVectorizer.json deleted file mode 100644 index ff45b727..00000000 --- a/src/refined_types/sklearn/feature_extraction/text/TfidfVectorizer.json +++ /dev/null @@ -1,128 +0,0 @@ -{ - "sklearn.feature_extraction.text.TfidfVectorizer": { - "input": { - "refined_type": { - "kind": "EnumType", - "values": ["file", "filename", "content"] - }, - "docstring": { - "type": "{'filename', 'file', 'content'}, default='content'", - "description": "- If `'filename'`, the sequence passed as an argument to fit is\n expected to be a list of filenames that need reading to fetch\n the raw content to analyze.\n\n- If `'file'`, the sequence items must have a 'read' method (file-like\n object) that is called to fetch the bytes in memory.\n\n- If `'content'`, the input is expected to be a sequence of items that\n can be of type string or byte." - } - }, - "decode_error": { - "refined_type": { - "kind": "EnumType", - "values": ["ignore", "replace", "strict"] - }, - "docstring": { - "type": "{'strict', 'ignore', 'replace'}, default='strict'", - "description": "Instruction on what to do if a byte sequence is given to analyze that\ncontains characters not of the given `encoding`. By default, it is\n'strict', meaning that a UnicodeDecodeError will be raised. Other\nvalues are 'ignore' and 'replace'." - } - }, - "strip_accents": { - "refined_type": { - "kind": "EnumType", - "values": ["ascii", "unicode"] - }, - "docstring": { - "type": "{'ascii', 'unicode'}, default=None", - "description": "Remove accents and perform other character normalization\nduring the preprocessing step.\n'ascii' is a fast method that only works on characters that have\nan direct ASCII mapping.\n'unicode' is a slightly slower method that works on any characters.\nNone (default) does nothing.\n\nBoth 'ascii' and 'unicode' use NFKD normalization from\n:func:`unicodedata.normalize`." - } - }, - "analyzer": { - "refined_type": { - "kind": "UnionType", - "types": [ - { - "kind": "EnumType", - "values": ["char_wb", "char", "word"] - }, - { - "kind": "NamedType", - "name": "callable" - } - ] - }, - "docstring": { - "type": "{'word', 'char', 'char_wb'} or callable, default='word'", - "description": "Whether the feature should be made of word or character n-grams.\nOption 'char_wb' creates character n-grams only from text inside\nword boundaries; n-grams at the edges of words are padded with space.\n\nIf a callable is passed it is used to extract the sequence of features\nout of the raw, unprocessed input.\n\n.. versionchanged:: 0.21\n Since v0.21, if ``input`` is ``'filename'`` or ``'file'``, the data\n is first read from the file and then passed to the given callable\n analyzer." - } - }, - "stop_words": { - "refined_type": { - "kind": "UnionType", - "types": [ - { - "kind": "EnumType", - "values": ["english"] - }, - { - "kind": "NamedType", - "name": "list" - } - ] - }, - "docstring": { - "type": "{'english'}, list, default=None", - "description": "If a string, it is passed to _check_stop_list and the appropriate stop\nlist is returned. 'english' is currently the only supported string\nvalue.\nThere are several known issues with 'english' and you should\nconsider an alternative (see :ref:`stop_words`).\n\nIf a list, that list is assumed to contain stop words, all of which\nwill be removed from the resulting tokens.\nOnly applies if ``analyzer == 'word'``.\n\nIf None, no stop words will be used. max_df can be set to a value\nin the range [0.7, 1.0) to automatically detect and filter stop\nwords based on intra corpus document frequency of terms." - } - }, - "max_df": { - "refined_type": { - "kind": "UnionType", - "types": [ - { - "kind": "BoundaryType", - "baseType": "float", - "min": 0, - "minInclusive": true, - "max": 1, - "maxInclusive": true - }, - { - "kind": "NamedType", - "name": "int" - } - ] - }, - "docstring": { - "type": "float or int, default=1.0", - "description": "When building the vocabulary ignore terms that have a document\nfrequency strictly higher than the given threshold (corpus-specific\nstop words).\nIf float in range [0.0, 1.0], the parameter represents a proportion of\ndocuments, integer absolute counts.\nThis parameter is ignored if vocabulary is not None." - } - }, - "min_df": { - "refined_type": { - "kind": "UnionType", - "types": [ - { - "kind": "BoundaryType", - "baseType": "float", - "min": 0, - "minInclusive": true, - "max": 1, - "maxInclusive": true - }, - { - "kind": "NamedType", - "name": "int" - } - ] - }, - "docstring": { - "type": "float or int, default=1", - "description": "When building the vocabulary ignore terms that have a document\nfrequency strictly lower than the given threshold. This value is also\ncalled cut-off in the literature.\nIf float in range of [0.0, 1.0], the parameter represents a proportion\nof documents, integer absolute counts.\nThis parameter is ignored if vocabulary is not None." - } - }, - "norm": { - "refined_type": { - "kind": "EnumType", - "values": ["l1", "l2"] - }, - "docstring": { - "type": "{'l1', 'l2'}, default='l2'", - "description": "Each output row will have unit norm, either:\n\n- 'l2': Sum of squares of vector elements is 1. The cosine\n similarity between two vectors is their dot product when l2 norm has\n been applied.\n- 'l1': Sum of absolute values of vector elements is 1.\n See :func:`preprocessing.normalize`." - } - } - } -} diff --git a/src/refined_types/sklearn/impute/SimpleImputer.json b/src/refined_types/sklearn/impute/SimpleImputer.json deleted file mode 100644 index e3fd73f8..00000000 --- a/src/refined_types/sklearn/impute/SimpleImputer.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "sklearn.impute.SimpleImputer": { - "strategy": { - "refined_type": { - "kind": "EnumType", - "values": ["mean", "median", "most_frequent", "constant"] - }, - "docstring": { - "type": "str, default='mean'", - "description": "The imputation strategy.\n\n- If \"mean\", then replace missing values using the mean along\n each column. Can only be used with numeric data.\n- If \"median\", then replace missing values using the median along\n each column. Can only be used with numeric data.\n- If \"most_frequent\", then replace missing using the most frequent\n value along each column. Can be used with strings or numeric data.\n If there is more than one such value, only the smallest is returned.\n- If \"constant\", then replace missing values with fill_value. Can be\n used with strings or numeric data.\n\n.. versionadded:: 0.20\n strategy=\"constant\" for fixed value imputation." - } - } - } -} diff --git a/src/refined_types/sklearn/linear_model/ElasticNet.json b/src/refined_types/sklearn/linear_model/ElasticNet.json deleted file mode 100644 index b535e827..00000000 --- a/src/refined_types/sklearn/linear_model/ElasticNet.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "sklearn.linear_model.ElasticNet": { - "l1_ratio": { - "refined_type": { - "kind": "BoundaryType", - "baseType": "float", - "min": 0, - "minInclusive": true, - "max": 1, - "maxInclusive": true - }, - "docstring": { - "type": "float, default=0.5", - "description": "The ElasticNet mixing parameter, with 0 <= l1_ratio <= 1. For l1_ratio = 0 the penalty is an L2 penalty. For l1_ratio = 1 it is an L1 penalty. For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.\n\n" - } - }, - "selection": { - "refined_type": { - "kind": "EnumType", - "values": ["cyclic", "random"] - }, - "docstring": { - "type": "{'cyclic', 'random'}, default='cyclic'", - "description": "If set to 'random', a random coefficient is updated every iteration\nrather than looping over features sequentially by default. This\n(setting to 'random') often leads to significantly faster convergence\nespecially when tol is higher than 1e-4." - } - } - } -} diff --git a/src/refined_types/sklearn/linear_model/Lasso.json b/src/refined_types/sklearn/linear_model/Lasso.json deleted file mode 100644 index 655c5f4e..00000000 --- a/src/refined_types/sklearn/linear_model/Lasso.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "sklearn.linear_model.Lasso": { - "selection": { - "refined_type": { - "kind": "EnumType", - "values": ["cyclic", "random"] - }, - "docstring": { - "type": "{'cyclic', 'random'}, default='cyclic'", - "description": "If set to 'random', a random coefficient is updated every iteration\nrather than looping over features sequentially by default. This\n(setting to 'random') often leads to significantly faster convergence\nespecially when tol is higher than 1e-4." - } - } - } -} diff --git a/src/refined_types/sklearn/linear_model/LinearRegression.json b/src/refined_types/sklearn/linear_model/LinearRegression.json deleted file mode 100644 index 07772a8a..00000000 --- a/src/refined_types/sklearn/linear_model/LinearRegression.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "sklearn.linear_model.LinearRegression": {} -} diff --git a/src/refined_types/sklearn/linear_model/LogisticRegression.json b/src/refined_types/sklearn/linear_model/LogisticRegression.json deleted file mode 100644 index d1858e57..00000000 --- a/src/refined_types/sklearn/linear_model/LogisticRegression.json +++ /dev/null @@ -1,95 +0,0 @@ -{ - "sklearn.linear_model.LogisticRegression": { - "penalty": { - "refined_type": { - "kind": "EnumType", - "values": ["none", "elasticnet", "l1", "l2"] - }, - "docstring": { - "type": "{'l1', 'l2', 'elasticnet', 'none'}, default='l2'", - "description": "Specify the norm of the penalty:\n\n- `'none'`: no penalty is added;\n- `'l2'`: add a L2 penalty term and it is the default choice;\n- `'l1'`: add a L1 penalty term;\n- `'elasticnet'`: both L1 and L2 penalty terms are added.\n\n.. warning::\n Some penalties may not work with some solvers. See the parameter\n `solver` below, to know the compatibility between the penalty and\n solver.\n\n.. versionadded:: 0.19\n l1 penalty with SAGA solver (allowing 'multinomial' + L1)" - } - }, - "C": { - "refined_type": { - "kind": "BoundaryType", - "baseType": "float", - "min": 0, - "minInclusive": false, - "max": null, - "maxInclusive": false - }, - "docstring": { - "type": "float, default=1.0", - "description": "Inverse of regularization strength; must be a positive float.\nLike in support vector machines, smaller values specify stronger\nregularization." - } - }, - "class_weight": { - "refined_type": { - "kind": "UnionType", - "types": [ - { - "kind": "NamedType", - "name": "dict" - }, - { - "kind": "EnumType", - "values": ["balanced"] - } - ] - }, - "docstring": { - "type": "dict or 'balanced', default=None", - "description": "Weights associated with classes in the form {class_label: weight}. If not given, all classes are supposed to have weight one.\n\nThe “balanced” mode uses the values of y to automatically adjust weights inversely proportional to class frequencies in the input data as n_samples / (n_classes * np.bincount(y)).\n\nNote that these weights will be multiplied with sample_weight (passed through the fit method) if sample_weight is specified." - } - }, - "solver": { - "refined_type": { - "kind": "EnumType", - "values": ["lbfgs", "newton-cg", "liblinear", "sag", "saga"] - }, - "docstring": { - "type": "{'newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga'}, default='lbfgs'", - "description": "Algorithm to use in the optimization problem. Default is 'lbfgs'.\nTo choose a solver, you might want to consider the following aspects:\n\n - For small datasets, 'liblinear' is a good choice, whereas 'sag'\n and 'saga' are faster for large ones;\n - For multiclass problems, only 'newton-cg', 'sag', 'saga' and\n 'lbfgs' handle multinomial loss;\n - 'liblinear' is limited to one-versus-rest schemes.\n\n.. warning::\n The choice of the algorithm depends on the penalty chosen:\n Supported penalties by solver:\n\n - 'newton-cg' - ['l2', 'none']\n - 'lbfgs' - ['l2', 'none']\n - 'liblinear' - ['l1', 'l2']\n - 'sag' - ['l2', 'none']\n - 'saga' - ['elasticnet', 'l1', 'l2', 'none']\n\n.. note::\n 'sag' and 'saga' fast convergence is only guaranteed on\n features with approximately the same scale. You can\n preprocess the data with a scaler from :mod:`sklearn.preprocessing`.\n\n.. seealso::\n Refer to the User Guide for more information regarding\n :class:`LogisticRegression` and more specifically the\n `Table `_\n summarazing solver/penalty supports.\n \n\n.. versionadded:: 0.17\n Stochastic Average Gradient descent solver.\n.. versionadded:: 0.19\n SAGA solver.\n.. versionchanged:: 0.22\n The default solver changed from 'liblinear' to 'lbfgs' in 0.22." - } - }, - "multi_class": { - "refined_type": { - "kind": "EnumType", - "values": ["multinomial", "ovr", "auto"] - }, - "docstring": { - "type": "{'auto', 'ovr', 'multinomial'}, default='auto'", - "description": "If the option chosen is 'ovr', then a binary problem is fit for each\nlabel. For 'multinomial' the loss minimised is the multinomial loss fit\nacross the entire probability distribution, *even when the data is\nbinary*. 'multinomial' is unavailable when solver='liblinear'.\n'auto' selects 'ovr' if the data is binary, or if solver='liblinear',\nand otherwise selects 'multinomial'.\n\n.. versionadded:: 0.18\n Stochastic Average Gradient descent solver for 'multinomial' case.\n.. versionchanged:: 0.22\n Default changed from 'ovr' to 'auto' in 0.22." - } - }, - "verbose": { - "refined_type": { - "kind": "BoundaryType", - "baseType": "int", - "min": 0, - "minInclusive": false, - "max": null, - "maxInclusive": false - }, - "docstring": { - "type": "int, default=0", - "description": "For the liblinear and lbfgs solvers set verbose to any positive\nnumber for verbosity." - } - }, - "l1_ratio": { - "refined_type": { - "kind": "BoundaryType", - "baseType": "float", - "min": 0, - "minInclusive": true, - "max": 1, - "maxInclusive": true - }, - "docstring": { - "type": "float, default=None", - "description": "The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only\nused if ``penalty='elasticnet'``. Setting ``l1_ratio=0`` is equivalent\nto using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent\nto using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a\ncombination of L1 and L2." - } - } - } -} diff --git a/src/refined_types/sklearn/linear_model/Ridge.json b/src/refined_types/sklearn/linear_model/Ridge.json deleted file mode 100644 index 27e71bcf..00000000 --- a/src/refined_types/sklearn/linear_model/Ridge.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "sklearn.linear_model.Ridge": { - "solver": { - "refined_type": { - "kind": "EnumType", - "values": ["sparse_cg", "sag", "cholesky", "lsqr", "auto", "svd", "lbfgs", "saga"] - }, - "docstring": { - "type": "{'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag', 'saga', 'lbfgs'}, default='auto'", - "description": "Solver to use in the computational routines:\n\n- 'auto' chooses the solver automatically based on the type of data.\n\n- 'svd' uses a Singular Value Decomposition of X to compute the Ridge\n coefficients. More stable for singular matrices than 'cholesky'.\n\n- 'cholesky' uses the standard scipy.linalg.solve function to\n obtain a closed-form solution.\n\n- 'sparse_cg' uses the conjugate gradient solver as found in\n scipy.sparse.linalg.cg. As an iterative algorithm, this solver is\n more appropriate than 'cholesky' for large-scale data\n (possibility to set `tol` and `max_iter`).\n\n- 'lsqr' uses the dedicated regularized least-squares routine\n scipy.sparse.linalg.lsqr. It is the fastest and uses an iterative\n procedure.\n\n- 'sag' uses a Stochastic Average Gradient descent, and 'saga' uses\n its improved, unbiased version named SAGA. Both methods also use an\n iterative procedure, and are often faster than other solvers when\n both n_samples and n_features are large. Note that 'sag' and\n 'saga' fast convergence is only guaranteed on features with\n approximately the same scale. You can preprocess the data with a\n scaler from sklearn.preprocessing.\n\n- 'lbfgs' uses L-BFGS-B algorithm implemented in\n `scipy.optimize.minimize`. It can be used only when `positive`\n is True.\n\nAll last six solvers support both dense and sparse data. However, only\n'sag', 'sparse_cg', and 'lbfgs' support sparse input when `fit_intercept`\nis True.\n\n.. versionadded:: 0.17\n Stochastic Average Gradient descent solver.\n.. versionadded:: 0.19\n SAGA solver." - } - } - } -} diff --git a/src/refined_types/sklearn/linear_model/SGDClassifier.json b/src/refined_types/sklearn/linear_model/SGDClassifier.json deleted file mode 100644 index 085145e6..00000000 --- a/src/refined_types/sklearn/linear_model/SGDClassifier.json +++ /dev/null @@ -1,91 +0,0 @@ -{ - "sklearn.linear_model.SGDClassifier": { - "loss": { - "refined_type": { - "kind": "EnumType", - "values": [ - "hinge", - "log", - "modified_huber", - "squared_hinge", - "perceptron", - "squared_error", - "huber", - "epsilon_insensitive", - "squared_epsilon_insensitive" - ] - }, - "docstring": { - "type": "str, default='hinge'", - "description": "The loss function to be used. Defaults to 'hinge', which gives a\nlinear SVM.\n\nThe possible options are 'hinge', 'log', 'modified_huber',\n'squared_hinge', 'perceptron', or a regression loss: 'squared_error',\n'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.\n\nThe 'log' loss gives logistic regression, a probabilistic classifier.\n'modified_huber' is another smooth loss that brings tolerance to\noutliers as well as probability estimates.\n'squared_hinge' is like hinge but is quadratically penalized.\n'perceptron' is the linear loss used by the perceptron algorithm.\nThe other losses are designed for regression but can be useful in\nclassification as well; see\n:class:`~sklearn.linear_model.SGDRegressor` for a description.\n\nMore details about the losses formulas can be found in the\n:ref:`User Guide `.\n\n.. deprecated:: 1.0\n The loss 'squared_loss' was deprecated in v1.0 and will be removed\n in version 1.2. Use `loss='squared_error'` which is equivalent." - } - }, - "penalty": { - "refined_type": { - "kind": "EnumType", - "values": ["elasticnet", "l1", "l2"] - }, - "docstring": { - "type": "{'l2', 'l1', 'elasticnet'}, default='l2'", - "description": "The penalty (aka regularization term) to be used. Defaults to 'l2'\nwhich is the standard regularizer for linear SVM models. 'l1' and\n'elasticnet' might bring sparsity to the model (feature selection)\nnot achievable with 'l2'." - } - }, - "l1_ratio": { - "refined_type": { - "kind": "BoundaryType", - "baseType": "float", - "min": 0, - "minInclusive": true, - "max": 1, - "maxInclusive": true - }, - "docstring": { - "type": "float, default=0.15", - "description": "The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.\nl1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.\nOnly used if `penalty` is 'elasticnet'." - } - }, - "learning_rate": { - "refined_type": { - "kind": "EnumType", - "values": ["constant", "optimal", "invscaling", "adaptive"] - }, - "docstring": { - "type": "str, default='optimal'", - "description": "The learning rate schedule:\n\n- 'constant': `eta = eta0`\n- 'optimal': `eta = 1.0 / (alpha * (t + t0))`\n where t0 is chosen by a heuristic proposed by Leon Bottou.\n- 'invscaling': `eta = eta0 / pow(t, power_t)`\n- 'adaptive': eta = eta0, as long as the training keeps decreasing.\n Each time n_iter_no_change consecutive epochs fail to decrease the\n training loss by tol or fail to increase validation score by tol if\n early_stopping is True, the current learning rate is divided by 5.\n\n .. versionadded:: 0.20\n Added 'adaptive' option" - } - }, - "validation_fraction": { - "refined_type": { - "kind": "BoundaryType", - "baseType": "float", - "min": 0, - "minInclusive": false, - "max": 1, - "maxInclusive": false - }, - "docstring": { - "type": "float, default=0.1", - "description": "The proportion of training data to set aside as validation set for\nearly stopping. Must be between 0 and 1.\nOnly used if `early_stopping` is True.\n\n.. versionadded:: 0.20\n Added 'validation_fraction' option" - } - }, - "class_weight": { - "refined_type": { - "kind": "UnionType", - "types": [ - { - "kind": "NamedType", - "name": "dict" - }, - { - "kind": "EnumType", - "values": ["balanced"] - } - ] - }, - "docstring": { - "type": "dict, {class_label: weight} or \"balanced\", default=None", - "description": "Preset for the class_weight fit parameter.\n\nWeights associated with classes. If not given, all classes\nare supposed to have weight one.\n\nThe \"balanced\" mode uses the values of y to automatically adjust\nweights inversely proportional to class frequencies in the input data\nas ``n_samples / (n_classes * np.bincount(y))``." - } - } - } -} diff --git a/src/refined_types/sklearn/metrics/accuracy_score.json b/src/refined_types/sklearn/metrics/accuracy_score.json deleted file mode 100644 index 54721510..00000000 --- a/src/refined_types/sklearn/metrics/accuracy_score.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "sklearn.metrics.accuracy_score": {} -} diff --git a/src/refined_types/sklearn/metrics/classification_report.json b/src/refined_types/sklearn/metrics/classification_report.json deleted file mode 100644 index 6645ecd7..00000000 --- a/src/refined_types/sklearn/metrics/classification_report.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "sklearn.metrics.classification_report": { - "zero_division": { - "refined_type": { - "kind": "UnionType", - "types": [ - { - "kind": "EnumType", - "values": ["warn"] - }, - { - "kind": "NamedType", - "name": "0" - }, - { - "kind": "NamedType", - "name": "1" - } - ] - }, - "docstring": { - "type": "\"warn\", 0 or 1, default=\"warn\"", - "description": "Sets the value to return when there is a zero division. If set to\n\"warn\", this acts as 0, but warnings are also raised." - } - } - } -} diff --git a/src/refined_types/sklearn/metrics/cohen_kappa_score.json b/src/refined_types/sklearn/metrics/cohen_kappa_score.json deleted file mode 100644 index 0f9f4740..00000000 --- a/src/refined_types/sklearn/metrics/cohen_kappa_score.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "sklearn.metrics.cohen_kappa_score": { - "weights": { - "refined_type": { - "kind": "EnumType", - "values": ["linear", "quadratic"] - }, - "docstring": { - "type": "{'linear', 'quadratic'}, default=None", - "description": "Weighting type to calculate the score. `None` means no weighted;\n\"linear\" means linear weighted; \"quadratic\" means quadratic weighted." - } - } - } -} diff --git a/src/refined_types/sklearn/metrics/confusion_matrix.json b/src/refined_types/sklearn/metrics/confusion_matrix.json deleted file mode 100644 index 563dfc10..00000000 --- a/src/refined_types/sklearn/metrics/confusion_matrix.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "sklearn.metrics.confusion_matrix": { - "normalize": { - "refined_type": { - "kind": "EnumType", - "values": ["true", "pred", "all"] - }, - "docstring": { - "type": "{'true', 'pred', 'all'}, default=None", - "description": "Normalizes confusion matrix over the true (rows), predicted (columns)\nconditions or all the population. If None, confusion matrix will not be\nnormalized." - } - } - } -} diff --git a/src/refined_types/sklearn/metrics/f1_score.json b/src/refined_types/sklearn/metrics/f1_score.json deleted file mode 100644 index 73d894d3..00000000 --- a/src/refined_types/sklearn/metrics/f1_score.json +++ /dev/null @@ -1,37 +0,0 @@ -{ - "sklearn.metrics.f1_score": { - "average": { - "refined_type": { - "kind": "EnumType", - "values": ["micro", "macro", "samples", "weighted", "binary"] - }, - "docstring": { - "type": "{'micro', 'macro', 'samples','weighted', 'binary'} or None, default='binary'", - "description": "This parameter is required for multiclass/multilabel targets.\nIf ``None``, the scores for each class are returned. Otherwise, this\ndetermines the type of averaging performed on the data:\n\n``'binary'``:\n Only report results for the class specified by ``pos_label``.\n This is applicable only if targets (``y_{true,pred}``) are binary.\n``'micro'``:\n Calculate metrics globally by counting the total true positives,\n false negatives and false positives.\n``'macro'``:\n Calculate metrics for each label, and find their unweighted\n mean. This does not take label imbalance into account.\n``'weighted'``:\n Calculate metrics for each label, and find their average weighted\n by support (the number of true instances for each label). This\n alters 'macro' to account for label imbalance; it can result in an\n F-score that is not between precision and recall.\n``'samples'``:\n Calculate metrics for each instance, and find their average (only\n meaningful for multilabel classification where this differs from\n :func:`accuracy_score`)." - } - }, - "zero_division": { - "refined_type": { - "kind": "UnionType", - "types": [ - { - "kind": "EnumType", - "values": ["warn"] - }, - { - "kind": "NamedType", - "name": "0" - }, - { - "kind": "NamedType", - "name": "1" - } - ] - }, - "docstring": { - "type": "\"warn\", 0 or 1, default=\"warn\"", - "description": "Sets the value to return when there is a zero division, i.e. when all\npredictions and labels are negative. If set to \"warn\", this acts as 0,\nbut warnings are also raised." - } - } - } -} diff --git a/src/refined_types/sklearn/metrics/mean_absolute_error.json b/src/refined_types/sklearn/metrics/mean_absolute_error.json deleted file mode 100644 index 133d944f..00000000 --- a/src/refined_types/sklearn/metrics/mean_absolute_error.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "sklearn.metrics.mean_absolute_error": { - "multioutput": { - "refined_type": { - "kind": "UnionType", - "types": [ - { - "kind": "EnumType", - "values": ["raw_values", "uniform_average"] - }, - { - "kind": "NamedType", - "name": "array-like" - } - ] - }, - "docstring": { - "type": "{'raw_values', 'uniform_average'} or array-like of shape (n_outputs,), default='uniform_average'", - "description": "Defines aggregating of multiple output values.\nArray-like value defines weights used to average errors.\n\n'raw_values' :\n Returns a full set of errors in case of multioutput input.\n\n'uniform_average' :\n Errors of all outputs are averaged with uniform weight." - } - } - } -} diff --git a/src/refined_types/sklearn/metrics/mean_squared_error.json b/src/refined_types/sklearn/metrics/mean_squared_error.json deleted file mode 100644 index 581cec2e..00000000 --- a/src/refined_types/sklearn/metrics/mean_squared_error.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "sklearn.metrics.mean_squared_error": { - "multioutput": { - "refined_type": { - "kind": "UnionType", - "types": [ - { - "kind": "EnumType", - "values": ["raw_values", "uniform_average"] - }, - { - "kind": "NamedType", - "name": "array-like" - } - ] - }, - "docstring": { - "type": "{'raw_values', 'uniform_average'} or array-like of shape (n_outputs,), default='uniform_average'", - "description": "Defines aggregating of multiple output values.\nArray-like value defines weights used to average errors.\n\n'raw_values' :\n Returns a full set of errors in case of multioutput input.\n\n'uniform_average' :\n Errors of all outputs are averaged with uniform weight." - } - } - } -} diff --git a/src/refined_types/sklearn/metrics/mean_squared_log_error.json b/src/refined_types/sklearn/metrics/mean_squared_log_error.json deleted file mode 100644 index cc21d991..00000000 --- a/src/refined_types/sklearn/metrics/mean_squared_log_error.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "sklearn.metrics.mean_squared_log_error": { - "multioutput": { - "refined_type": { - "kind": "UnionType", - "types": [ - { - "kind": "EnumType", - "values": ["raw_values", "uniform_average"] - }, - { - "kind": "NamedType", - "name": "array-like" - } - ] - }, - "docstring": { - "type": "{'raw_values', 'uniform_average'} or array-like of shape (n_outputs,), default='uniform_average'", - "description": "Defines aggregating of multiple output values.\nArray-like value defines weights used to average errors.\n\n'raw_values' :\n Returns a full set of errors when the input is of multioutput\n format.\n\n'uniform_average' :\n Errors of all outputs are averaged with uniform weight." - } - } - } -} diff --git a/src/refined_types/sklearn/metrics/precision_score.json b/src/refined_types/sklearn/metrics/precision_score.json deleted file mode 100644 index 6eb279c5..00000000 --- a/src/refined_types/sklearn/metrics/precision_score.json +++ /dev/null @@ -1,37 +0,0 @@ -{ - "sklearn.metrics.precision_score": { - "average": { - "refined_type": { - "kind": "EnumType", - "values": ["micro", "macro", "samples", "weighted", "binary"] - }, - "docstring": { - "type": "{'micro', 'macro', 'samples', 'weighted', 'binary'} or None, default='binary'", - "description": "This parameter is required for multiclass/multilabel targets.\nIf ``None``, the scores for each class are returned. Otherwise, this\ndetermines the type of averaging performed on the data:\n\n``'binary'``:\n Only report results for the class specified by ``pos_label``.\n This is applicable only if targets (``y_{true,pred}``) are binary.\n``'micro'``:\n Calculate metrics globally by counting the total true positives,\n false negatives and false positives.\n``'macro'``:\n Calculate metrics for each label, and find their unweighted\n mean. This does not take label imbalance into account.\n``'weighted'``:\n Calculate metrics for each label, and find their average weighted\n by support (the number of true instances for each label). This\n alters 'macro' to account for label imbalance; it can result in an\n F-score that is not between precision and recall.\n``'samples'``:\n Calculate metrics for each instance, and find their average (only\n meaningful for multilabel classification where this differs from\n :func:`accuracy_score`)." - } - }, - "zero_division": { - "refined_type": { - "kind": "UnionType", - "types": [ - { - "kind": "EnumType", - "values": ["warn"] - }, - { - "kind": "NamedType", - "name": "0" - }, - { - "kind": "NamedType", - "name": "1" - } - ] - }, - "docstring": { - "type": "\"warn\", 0 or 1, default=\"warn\"", - "description": "Sets the value to return when there is a zero division. If set to\n\"warn\", this acts as 0, but warnings are also raised." - } - } - } -} diff --git a/src/refined_types/sklearn/metrics/r2_score.json b/src/refined_types/sklearn/metrics/r2_score.json deleted file mode 100644 index d8b8cb64..00000000 --- a/src/refined_types/sklearn/metrics/r2_score.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "sklearn.metrics.r2_score": { - "multioutput": { - "refined_type": { - "kind": "UnionType", - "types": [ - { - "kind": "EnumType", - "values": ["raw_values", "uniform_average", "variance_weighted"] - }, - { - "kind": "NamedType", - "name": "array-like" - } - ] - }, - "docstring": { - "type": "{'raw_values', 'uniform_average', 'variance_weighted'}, array-like of shape (n_outputs,) or None, default='uniform_average'", - "description": "Defines aggregating of multiple output scores.\nArray-like value defines weights used to average scores.\nDefault is \"uniform_average\".\n\n'raw_values' :\n Returns a full set of scores in case of multioutput input.\n\n'uniform_average' :\n Scores of all outputs are averaged with uniform weight.\n\n'variance_weighted' :\n Scores of all outputs are averaged, weighted by the variances\n of each individual output.\n\n.. versionchanged:: 0.19\n Default value of multioutput is 'uniform_average'." - } - } - } -} diff --git a/src/refined_types/sklearn/metrics/recall_score.json b/src/refined_types/sklearn/metrics/recall_score.json deleted file mode 100644 index 85465801..00000000 --- a/src/refined_types/sklearn/metrics/recall_score.json +++ /dev/null @@ -1,37 +0,0 @@ -{ - "sklearn.metrics.recall_score": { - "average": { - "refined_type": { - "kind": "EnumType", - "values": ["micro", "macro", "samples", "weighted", "binary"] - }, - "docstring": { - "type": "{'micro', 'macro', 'samples', 'weighted', 'binary'} or None, default='binary'", - "description": "This parameter is required for multiclass/multilabel targets.\nIf ``None``, the scores for each class are returned. Otherwise, this\ndetermines the type of averaging performed on the data:\n\n``'binary'``:\n Only report results for the class specified by ``pos_label``.\n This is applicable only if targets (``y_{true,pred}``) are binary.\n``'micro'``:\n Calculate metrics globally by counting the total true positives,\n false negatives and false positives.\n``'macro'``:\n Calculate metrics for each label, and find their unweighted\n mean. This does not take label imbalance into account.\n``'weighted'``:\n Calculate metrics for each label, and find their average weighted\n by support (the number of true instances for each label). This\n alters 'macro' to account for label imbalance; it can result in an\n F-score that is not between precision and recall. Weighted recall\n is equal to accuracy.\n``'samples'``:\n Calculate metrics for each instance, and find their average (only\n meaningful for multilabel classification where this differs from\n :func:`accuracy_score`)." - } - }, - "zero_division": { - "refined_type": { - "kind": "UnionType", - "types": [ - { - "kind": "EnumType", - "values": ["warn"] - }, - { - "kind": "NamedType", - "name": "0" - }, - { - "kind": "NamedType", - "name": "1" - } - ] - }, - "docstring": { - "type": "\"warn\", 0 or 1, default=\"warn\"", - "description": "Sets the value to return when there is a zero division. If set to\n\"warn\", this acts as 0, but warnings are also raised." - } - } - } -} diff --git a/src/refined_types/sklearn/metrics/roc_auc_score.json b/src/refined_types/sklearn/metrics/roc_auc_score.json deleted file mode 100644 index 948f9bdd..00000000 --- a/src/refined_types/sklearn/metrics/roc_auc_score.json +++ /dev/null @@ -1,38 +0,0 @@ -{ - "sklearn.metrics.roc_auc_score": { - "average": { - "refined_type": { - "kind": "EnumType", - "values": ["micro", "macro", "samples", "weighted"] - }, - "docstring": { - "type": "{'micro', 'macro', 'samples', 'weighted'} or None, default='macro'", - "description": "If ``None``, the scores for each class are returned. Otherwise,\nthis determines the type of averaging performed on the data:\nNote: multiclass ROC AUC currently only handles the 'macro' and\n'weighted' averages.\n\n``'micro'``:\n Calculate metrics globally by considering each element of the label\n indicator matrix as a label.\n``'macro'``:\n Calculate metrics for each label, and find their unweighted\n mean. This does not take label imbalance into account.\n``'weighted'``:\n Calculate metrics for each label, and find their average, weighted\n by support (the number of true instances for each label).\n``'samples'``:\n Calculate metrics for each instance, and find their average.\n\nWill be ignored when ``y_true`` is binary." - } - }, - "max_fpr": { - "refined_type": { - "kind": "BoundaryType", - "baseType": "float", - "min": 0, - "max": 1, - "minInclusive": false, - "maxInclusive": true - }, - "docstring": { - "type": "float > 0 and <= 1, default=None", - "description": "If not ``None``, the standardized partial AUC [2]_ over the range\n[0, max_fpr] is returned. For the multiclass case, ``max_fpr``,\nshould be either equal to ``None`` or ``1.0`` as AUC ROC partial\ncomputation currently is not supported for multiclass." - } - }, - "multi_class": { - "refined_type": { - "kind": "EnumType", - "values": ["raise", "ovr", "ovo"] - }, - "docstring": { - "type": "{'raise', 'ovr', 'ovo'}, default='raise'", - "description": "Only used for multiclass targets. Determines the type of configuration\nto use. The default value raises an error, so either\n``'ovr'`` or ``'ovo'`` must be passed explicitly.\n\n``'ovr'``:\n Stands for One-vs-rest. Computes the AUC of each class\n against the rest [3]_ [4]_. This\n treats the multiclass case in the same way as the multilabel case.\n Sensitive to class imbalance even when ``average == 'macro'``,\n because class imbalance affects the composition of each of the\n 'rest' groupings.\n``'ovo'``:\n Stands for One-vs-one. Computes the average AUC of all\n possible pairwise combinations of classes [5]_.\n Insensitive to class imbalance when\n ``average == 'macro'``." - } - } - } -} diff --git a/src/refined_types/sklearn/metrics/roc_curve.json b/src/refined_types/sklearn/metrics/roc_curve.json deleted file mode 100644 index 04b6aaa1..00000000 --- a/src/refined_types/sklearn/metrics/roc_curve.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "sklearn.metrics.roc_curve": {} -} diff --git a/src/refined_types/sklearn/model_selection/GridSearchCV.json b/src/refined_types/sklearn/model_selection/GridSearchCV.json deleted file mode 100644 index 849b9c1a..00000000 --- a/src/refined_types/sklearn/model_selection/GridSearchCV.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "sklearn.model_selection.GridSearchCV": { - "error_score": { - "refined_type": { - "kind": "UnionType", - "types": [ - { - "kind": "EnumType", - "values": ["raise"] - }, - { - "kind": "NamedType", - "name": "numeric" - } - ] - }, - "docstring": { - "type": "'raise' or numeric, default=np.nan", - "description": "Value to assign to the score if an error occurs in estimator fitting.\nIf set to 'raise', the error is raised. If a numeric value is given,\nFitFailedWarning is raised. This parameter does not affect the refit\nstep, which will always raise the error." - } - } - } -} diff --git a/src/refined_types/sklearn/model_selection/GroupKFold.json b/src/refined_types/sklearn/model_selection/GroupKFold.json deleted file mode 100644 index 9714d861..00000000 --- a/src/refined_types/sklearn/model_selection/GroupKFold.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "sklearn.model_selection.GroupKFold": { - "n_splits": { - "refined_type": { - "kind": "BoundaryType", - "baseType": "int", - "min": 2, - "minInclusive": true, - "max": null, - "maxInclusive": false - }, - "docstring": { - "type": "int, default=5", - "description": "Number of folds. Must be at least 2.\n\n.. versionchanged:: 0.22\n ``n_splits`` default value changed from 3 to 5." - } - } - } -} diff --git a/src/refined_types/sklearn/model_selection/KFold.json b/src/refined_types/sklearn/model_selection/KFold.json deleted file mode 100644 index d6ce6b1f..00000000 --- a/src/refined_types/sklearn/model_selection/KFold.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "sklearn.model_selection.KFold": { - "n_splits": { - "refined_type": { - "kind": "BoundaryType", - "baseType": "int", - "min": 2, - "minInclusive": true, - "max": null, - "maxInclusive": false - }, - "docstring": { - "type": "int, default=5", - "description": "Number of folds. Must be at least 2.\n\n.. versionchanged:: 0.22\n ``n_splits`` default value changed from 3 to 5." - } - } - } -} diff --git a/src/refined_types/sklearn/model_selection/RandomizedSearchCV.json b/src/refined_types/sklearn/model_selection/RandomizedSearchCV.json deleted file mode 100644 index d85f7a4b..00000000 --- a/src/refined_types/sklearn/model_selection/RandomizedSearchCV.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "sklearn.model_selection.RandomizedSearchCV": { - "error_score": { - "refined_type": { - "kind": "UnionType", - "types": [ - { - "kind": "EnumType", - "values": ["raise"] - }, - { - "kind": "NamedType", - "name": "numeric" - } - ] - }, - "docstring": { - "type": "'raise' or numeric, default=np.nan", - "description": "Value to assign to the score if an error occurs in estimator fitting.\nIf set to 'raise', the error is raised. If a numeric value is given,\nFitFailedWarning is raised. This parameter does not affect the refit\nstep, which will always raise the error." - } - } - } -} diff --git a/src/refined_types/sklearn/model_selection/StratifiedKFold.json b/src/refined_types/sklearn/model_selection/StratifiedKFold.json deleted file mode 100644 index 215d3db0..00000000 --- a/src/refined_types/sklearn/model_selection/StratifiedKFold.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "sklearn.model_selection.StratifiedKFold": { - "n_splits": { - "refined_type": { - "kind": "BoundaryType", - "baseType": "int", - "min": 2, - "minInclusive": true, - "max": null, - "maxInclusive": false - }, - "docstring": { - "type": "int, default=5", - "description": "Number of folds. Must be at least 2.\n\n.. versionchanged:: 0.22\n ``n_splits`` default value changed from 3 to 5." - } - } - } -} diff --git a/src/refined_types/sklearn/model_selection/cross_val_score.json b/src/refined_types/sklearn/model_selection/cross_val_score.json deleted file mode 100644 index cb101aea..00000000 --- a/src/refined_types/sklearn/model_selection/cross_val_score.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "sklearn.model_selection.cross_val_score": { - "error_score": { - "refined_type": { - "kind": "UnionType", - "types": [ - { - "kind": "EnumType", - "values": ["raise"] - }, - { - "kind": "NamedType", - "name": "numeric" - } - ] - }, - "docstring": { - "type": "'raise' or numeric, default=np.nan", - "description": "Value to assign to the score if an error occurs in estimator fitting.\nIf set to 'raise', the error is raised.\nIf a numeric value is given, FitFailedWarning is raised.\n\n.. versionadded:: 0.20" - } - } - } -} diff --git a/src/refined_types/sklearn/model_selection/train_test_split.json b/src/refined_types/sklearn/model_selection/train_test_split.json deleted file mode 100644 index d592fbb2..00000000 --- a/src/refined_types/sklearn/model_selection/train_test_split.json +++ /dev/null @@ -1,50 +0,0 @@ -{ - "sklearn.model_selection.train_test_split": { - "test_size": { - "refined_type": { - "kind": "UnionType", - "types": [ - { - "kind": "BoundaryType", - "baseType": "float", - "min": 0, - "max": 1, - "minInclusive": true, - "maxInclusive": true - }, - { - "kind": "NamedType", - "name": "int" - } - ] - }, - "docstring": { - "type": "float or int, default=None", - "description": "If float, should be between 0.0 and 1.0 and represent the proportion\nof the dataset to include in the test split. If int, represents the\nabsolute number of test samples. If None, the value is set to the\ncomplement of the train size. If ``train_size`` is also None, it will\nbe set to 0.25." - } - }, - "train_size": { - "refined_type": { - "kind": "UnionType", - "types": [ - { - "kind": "BoundaryType", - "baseType": "float", - "min": 0, - "max": 1, - "minInclusive": true, - "maxInclusive": true - }, - { - "kind": "NamedType", - "name": "int" - } - ] - }, - "docstring": { - "type": "float or int, default=None", - "description": "If float, should be between 0.0 and 1.0 and represent the\nproportion of the dataset to include in the train split. If\nint, represents the absolute number of train samples. If None,\nthe value is automatically set to the complement of the test size." - } - } - } -} diff --git a/src/refined_types/sklearn/naive_bayes/GaussianNB.json b/src/refined_types/sklearn/naive_bayes/GaussianNB.json deleted file mode 100644 index 47d4024e..00000000 --- a/src/refined_types/sklearn/naive_bayes/GaussianNB.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "sklearn.naive_bayes.GaussianNB": {} -} diff --git a/src/refined_types/sklearn/naive_bayes/MultinomialNB.json b/src/refined_types/sklearn/naive_bayes/MultinomialNB.json deleted file mode 100644 index a7b75aad..00000000 --- a/src/refined_types/sklearn/naive_bayes/MultinomialNB.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "sklearn.naive_bayes.MultinomialNB": {} -} diff --git a/src/refined_types/sklearn/neighbors/KNeighborsRegressor.json b/src/refined_types/sklearn/neighbors/KNeighborsRegressor.json deleted file mode 100644 index 67590e26..00000000 --- a/src/refined_types/sklearn/neighbors/KNeighborsRegressor.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "sklearn.neighbors.KNeighborsRegressor": { - "weights": { - "refined_type": { - "kind": "UnionType", - "types": [ - { - "kind": "EnumType", - "values": ["uniform", "distance"] - }, - { - "kind": "NamedType", - "name": "callable" - } - ] - }, - "docstring": { - "type": "{'uniform', 'distance'} or callable, default='uniform'", - "description": "Weight function used in prediction. Possible values:\n\n- 'uniform' : uniform weights. All points in each neighborhood\n are weighted equally.\n- 'distance' : weight points by the inverse of their distance.\n in this case, closer neighbors of a query point will have a\n greater influence than neighbors which are further away.\n- [callable] : a user-defined function which accepts an\n array of distances, and returns an array of the same shape\n containing the weights.\n\nUniform weights are used by default." - } - }, - "algorithm": { - "refined_type": { - "kind": "EnumType", - "values": ["auto", "ball_tree", "kd_tree", "brute"] - }, - "docstring": { - "type": "{'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'", - "description": "Algorithm used to compute the nearest neighbors:\n\n- 'ball_tree' will use :class:`BallTree`\n- 'kd_tree' will use :class:`KDTree`\n- 'brute' will use a brute-force search.\n- 'auto' will attempt to decide the most appropriate algorithm\n based on the values passed to :meth:`fit` method.\n\nNote: fitting on sparse input will override the setting of\nthis parameter, using brute force." - } - } - } -} diff --git a/src/refined_types/sklearn/pipeline/FeatureUnion.json b/src/refined_types/sklearn/pipeline/FeatureUnion.json deleted file mode 100644 index 2b14d390..00000000 --- a/src/refined_types/sklearn/pipeline/FeatureUnion.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "sklearn.pipeline.FeatureUnion": {} -} diff --git a/src/refined_types/sklearn/pipeline/Pipeline.json b/src/refined_types/sklearn/pipeline/Pipeline.json deleted file mode 100644 index 3b634dd6..00000000 --- a/src/refined_types/sklearn/pipeline/Pipeline.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "sklearn.pipeline.Pipeline": {} -} diff --git a/src/refined_types/sklearn/preprocessing/LabelEncoder.json b/src/refined_types/sklearn/preprocessing/LabelEncoder.json deleted file mode 100644 index 7e729ec3..00000000 --- a/src/refined_types/sklearn/preprocessing/LabelEncoder.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "sklearn.preprocessing.LabelEncoder": {} -} diff --git a/src/refined_types/sklearn/preprocessing/MinMaxScaler.json b/src/refined_types/sklearn/preprocessing/MinMaxScaler.json deleted file mode 100644 index 09ce8bc5..00000000 --- a/src/refined_types/sklearn/preprocessing/MinMaxScaler.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "sklearn.preprocessing.MinMaxScaler": {} -} diff --git a/src/refined_types/sklearn/preprocessing/OneHotEncoder.json b/src/refined_types/sklearn/preprocessing/OneHotEncoder.json deleted file mode 100644 index 1843cc1e..00000000 --- a/src/refined_types/sklearn/preprocessing/OneHotEncoder.json +++ /dev/null @@ -1,52 +0,0 @@ -{ - "sklearn.preprocessing.OneHotEncoder": { - "categories": { - "refined_type": { - "kind": "UnionType", - "types": [ - { - "kind": "EnumType", - "values": ["auto"] - }, - { - "kind": "NamedType", - "name": "list of array-like" - } - ] - }, - "docstring": { - "type": "'auto' or a list of array-like, default='auto'", - "description": "Categories (unique values) per feature:\n\n- 'auto' : Determine categories automatically from the training data.\n- list : ``categories[i]`` holds the categories expected in the ith\n column. The passed categories should not mix strings and numeric\n values within a single feature, and should be sorted in case of\n numeric values.\n\nThe used categories can be found in the ``categories_`` attribute.\n\n.. versionadded:: 0.20" - } - }, - "drop": { - "refined_type": { - "kind": "UnionType", - "types": [ - { - "kind": "EnumType", - "values": ["first", "if_binary"] - }, - { - "kind": "NamedType", - "name": "array-like" - } - ] - }, - "docstring": { - "type": "{'first', 'if_binary'} or a array-like of shape (n_features,), default=None", - "description": "Specifies a methodology to use to drop one of the categories per\nfeature. This is useful in situations where perfectly collinear\nfeatures cause problems, such as when feeding the resulting data\ninto a neural network or an unregularized regression.\n\nHowever, dropping one category breaks the symmetry of the original\nrepresentation and can therefore induce a bias in downstream models,\nfor instance for penalized linear classification or regression models.\n\n- None : retain all features (the default).\n- 'first' : drop the first category in each feature. If only one\n category is present, the feature will be dropped entirely.\n- 'if_binary' : drop the first category in each feature with two\n categories. Features with 1 or more than 2 categories are\n left intact.\n- array : ``drop[i]`` is the category in feature ``X[:, i]`` that\n should be dropped.\n\n.. versionadded:: 0.21\n The parameter `drop` was added in 0.21.\n\n.. versionchanged:: 0.23\n The option `drop='if_binary'` was added in 0.23." - } - }, - "handle_unknown": { - "refined_type": { - "kind": "EnumType", - "values": ["error", "ignore"] - }, - "docstring": { - "type": "{'error', 'ignore'}, default='error'", - "description": "Whether to raise an error or ignore if an unknown categorical feature\nis present during transform (default is to raise). When this parameter\nis set to 'ignore' and an unknown category is encountered during\ntransform, the resulting one-hot encoded columns for this feature\nwill be all zeros. In the inverse transform, an unknown category\nwill be denoted as None." - } - } - } -} diff --git a/src/refined_types/sklearn/preprocessing/PolynomialFeatures.json b/src/refined_types/sklearn/preprocessing/PolynomialFeatures.json deleted file mode 100644 index 5b9ba4fb..00000000 --- a/src/refined_types/sklearn/preprocessing/PolynomialFeatures.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "sklearn.preprocessing.PolynomialFeatures": { - "order": { - "refined_type": { - "kind": "EnumType", - "values": ["C", "F"] - }, - "docstring": { - "type": "{'C', 'F'}, default='C'", - "description": "Order of output array in the dense case. `'F'` order is faster to\ncompute, but may slow down subsequent estimators.\n\n.. versionadded:: 0.21" - } - } - } -} diff --git a/src/refined_types/sklearn/preprocessing/RobustScaler.json b/src/refined_types/sklearn/preprocessing/RobustScaler.json deleted file mode 100644 index dfa68cf0..00000000 --- a/src/refined_types/sklearn/preprocessing/RobustScaler.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "sklearn.preprocessing.RobustScaler": { - "quantile_range": { - "refined_type": { - "kind": "BoundaryType", - "baseType": "tuple", - "min": 0, - "max": 100, - "minInclusive": false, - "maxInclusive": false - }, - "docstring": { - "type": "tuple (q_min, q_max), 0.0 < q_min < q_max < 100.0, default=(25.0, 75.0)", - "description": "Quantile range used to calculate `scale_`. By default this is equal to\nthe IQR, i.e., `q_min` is the first quantile and `q_max` is the third\nquantile.\n\n.. versionadded:: 0.18" - } - } - } -} diff --git a/src/refined_types/sklearn/preprocessing/StandardScaler.json b/src/refined_types/sklearn/preprocessing/StandardScaler.json deleted file mode 100644 index c81e8174..00000000 --- a/src/refined_types/sklearn/preprocessing/StandardScaler.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "sklearn.preprocessing.StandardScaler": {} -} diff --git a/src/refined_types/sklearn/svm/SVC.json b/src/refined_types/sklearn/svm/SVC.json deleted file mode 100644 index 41e202a9..00000000 --- a/src/refined_types/sklearn/svm/SVC.json +++ /dev/null @@ -1,85 +0,0 @@ -{ - "sklearn.svm.SVC": { - "C": { - "refined_type": { - "kind": "BoundaryType", - "baseType": "float", - "min": 0, - "minInclusive": false, - "max": null, - "maxInclusive": false - }, - "docstring": { - "type": "float, default=1.0", - "description": "Regularization parameter. The strength of the regularization is\ninversely proportional to C. Must be strictly positive. The penalty\nis a squared l2 penalty." - } - }, - "kernel": { - "refined_type": { - "kind": "UnionType", - "types": [ - { - "kind": "EnumType", - "values": ["linear", "poly", "rbf", "sigmoid", "precomputed"] - }, - { - "kind": "NamedType", - "name": "callable" - } - ] - }, - "docstring": { - "type": "{'linear', 'poly', 'rbf', 'sigmoid', 'precomputed'}, default='rbf'", - "description": "Specifies the kernel type to be used in the algorithm.\nIt must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or\na callable.\nIf none is given, 'rbf' will be used. If a callable is given it is\nused to pre-compute the kernel matrix from data matrices; that matrix\nshould be an array of shape ``(n_samples, n_samples)``." - } - }, - "gamma": { - "refined_type": { - "kind": "UnionType", - "types": [ - { - "kind": "EnumType", - "values": ["scale", "auto"] - }, - { - "kind": "NamedType", - "name": "float" - } - ] - }, - "docstring": { - "type": "{'scale', 'auto'} or float, default='scale'", - "description": "Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.\n\n- if ``gamma='scale'`` (default) is passed then it uses\n 1 / (n_features * X.var()) as value of gamma,\n- if 'auto', uses 1 / n_features.\n\n.. versionchanged:: 0.22\n The default value of ``gamma`` changed from 'auto' to 'scale'." - } - }, - "class_weight": { - "refined_type": { - "kind": "UnionType", - "types": [ - { - "kind": "NamedType", - "name": "dict" - }, - { - "kind": "EnumType", - "values": ["balanced"] - } - ] - }, - "docstring": { - "type": "dict or 'balanced', default=None", - "description": "Set the parameter C of class i to class_weight[i]*C for\nSVC. If not given, all classes are supposed to have\nweight one.\nThe \"balanced\" mode uses the values of y to automatically adjust\nweights inversely proportional to class frequencies in the input data\nas ``n_samples / (n_classes * np.bincount(y))``." - } - }, - "decision_function_shape": { - "refined_type": { - "kind": "EnumType", - "values": ["ovo", "ovr"] - }, - "docstring": { - "type": "{'ovo', 'ovr'}, default='ovr'", - "description": "Whether to return a one-vs-rest ('ovr') decision function of shape\n(n_samples, n_classes) as all other classifiers, or the original\none-vs-one ('ovo') decision function of libsvm which has shape\n(n_samples, n_classes * (n_classes - 1) / 2). However, one-vs-one\n('ovo') is always used as multi-class strategy. The parameter is\nignored for binary classification.\n\n.. versionchanged:: 0.19\n decision_function_shape is 'ovr' by default.\n\n.. versionadded:: 0.17\n *decision_function_shape='ovr'* is recommended.\n\n.. versionchanged:: 0.17\n Deprecated *decision_function_shape='ovo' and None*." - } - } - } -} diff --git a/src/refined_types/sklearn/tree/DecisionTreeClassifier.json b/src/refined_types/sklearn/tree/DecisionTreeClassifier.json deleted file mode 100644 index 417ff39e..00000000 --- a/src/refined_types/sklearn/tree/DecisionTreeClassifier.json +++ /dev/null @@ -1,84 +0,0 @@ -{ - "sklearn.tree.DecisionTreeClassifier": { - "criterion": { - "refined_type": { - "kind": "EnumType", - "values": ["gini", "entropy"] - }, - "docstring": { - "type": "{\"gini\", \"entropy\"}, default=\"gini\"", - "description": "The function to measure the quality of a split. Supported criteria are\n\"gini\" for the Gini impurity and \"entropy\" for the information gain." - } - }, - "splitter": { - "refined_type": { - "kind": "EnumType", - "values": ["best", "random"] - }, - "docstring": { - "type": "{\"best\", \"random\"}, default=\"best\"", - "description": "The strategy used to choose the split at each node. Supported\nstrategies are \"best\" to choose the best split and \"random\" to choose\nthe best random split." - } - }, - "max_features": { - "refined_type": { - "kind": "UnionType", - "types": [ - { - "kind": "NamedType", - "name": "int" - }, - { - "kind": "NamedType", - "name": "float" - }, - { - "kind": "EnumType", - "values": ["auto", "sqrt", "log2"] - } - ] - }, - "docstring": { - "type": "int, float or {\"auto\", \"sqrt\", \"log2\"}, default=None", - "description": "The number of features to consider when looking for the best split:\n\n - If int, then consider `max_features` features at each split.\n - If float, then `max_features` is a fraction and\n `int(max_features * n_features)` features are considered at each\n split.\n - If \"auto\", then `max_features=sqrt(n_features)`.\n - If \"sqrt\", then `max_features=sqrt(n_features)`.\n - If \"log2\", then `max_features=log2(n_features)`.\n - If None, then `max_features=n_features`.\n\nNote: the search for a split does not stop until at least one\nvalid partition of the node samples is found, even if it requires to\neffectively inspect more than ``max_features`` features." - } - }, - "class_weight": { - "refined_type": { - "kind": "UnionType", - "types": [ - { - "kind": "NamedType", - "name": "dict" - }, - { - "kind": "NamedType", - "name": "list of dicts" - }, - { - "kind": "EnumType", - "values": ["balanced"] - } - ] - }, - "docstring": { - "type": "dict, list of dict or \"balanced\", default=None", - "description": "Weights associated with classes in the form ``{class_label: weight}``.\nIf None, all classes are supposed to have weight one. For\nmulti-output problems, a list of dicts can be provided in the same\norder as the columns of y.\n\nNote that for multioutput (including multilabel) weights should be\ndefined for each class of every column in its own dict. For example,\nfor four-class multilabel classification weights should be\n[{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of\n[{1:1}, {2:5}, {3:1}, {4:1}].\n\nThe \"balanced\" mode uses the values of y to automatically adjust\nweights inversely proportional to class frequencies in the input data\nas ``n_samples / (n_classes * np.bincount(y))``\n\nFor multi-output, the weights of each column of y will be multiplied.\n\nNote that these weights will be multiplied with sample_weight (passed\nthrough the fit method) if sample_weight is specified." - } - }, - "ccp_alpha": { - "refined_type": { - "kind": "BoundaryType", - "baseType": "float", - "min": 0, - "minInclusive": true, - "max": null, - "maxInclusive": false - }, - "docstring": { - "type": "non-negative float, default=0.0", - "description": "Complexity parameter used for Minimal Cost-Complexity Pruning. The\nsubtree with the largest cost complexity that is smaller than\n``ccp_alpha`` will be chosen. By default, no pruning is performed. See\n:ref:`minimal_cost_complexity_pruning` for details.\n\n.. versionadded:: 0.22" - } - } - } -} diff --git a/src/refined_types/sklearn/utils/shuffle.json b/src/refined_types/sklearn/utils/shuffle.json deleted file mode 100644 index 1e6ac18c..00000000 --- a/src/refined_types/sklearn/utils/shuffle.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "sklearn.utils.shuffle": {} -} diff --git a/tests/data/migration/apiv1_data.json b/tests/data/migration/apiv1_data.json index 90f2cfac..c3393b99 100644 --- a/tests/data/migration/apiv1_data.json +++ b/tests/data/migration/apiv1_data.json @@ -24,6 +24,7 @@ "code": "class TestClass:\n \"\"\"This is a TestClass.\n It has no common use.\"\"\"\n pass", "instance_attributes": [ { + "id": "test/test/TestClass/a", "name": "a", "types": { "kind": "NamedType", @@ -31,6 +32,7 @@ } }, { + "id": "test/test/TestClass/b", "name": "b", "types": { "kind": "NamedType", @@ -38,6 +40,7 @@ } }, { + "id": "test/test/TestClass/c", "name": "c", "types": { "kind": "NamedType", diff --git a/tests/data/migration/apiv2_data.json b/tests/data/migration/apiv2_data.json index 14053a38..a9dd68ca 100644 --- a/tests/data/migration/apiv2_data.json +++ b/tests/data/migration/apiv2_data.json @@ -24,6 +24,7 @@ "code": "class TestClass:\n \"\"\"This is a TestClass.\n It has no common use.\"\"\"\n pass", "instance_attributes": [ { + "id": "test/test/TestClass/a", "name": "a", "types": { "kind": "NamedType", @@ -31,6 +32,7 @@ } }, { + "id": "test/test/TestClass/b", "name": "b", "types": { "kind": "NamedType", @@ -38,6 +40,7 @@ } }, { + "id": "test/test/TestClass/c", "name": "c", "types": { "kind": "NamedType", diff --git a/tests/library_analyzer/processing/annotations/model/test_annotations.py b/tests/library_analyzer/processing/annotations/model/test_annotations.py index e67329eb..2ecfd853 100644 --- a/tests/library_analyzer/processing/annotations/model/test_annotations.py +++ b/tests/library_analyzer/processing/annotations/model/test_annotations.py @@ -340,12 +340,12 @@ def test_annotation_store() -> None: }, }, } - assert annotations.to_json() == json_store - assert AnnotationStore.from_json(json_store).to_json() == json_store + assert annotations.to_dict() == json_store + assert AnnotationStore.from_dict(json_store).to_dict() == json_store @pytest.mark.parametrize( - ("annotation", "json"), + ("annotation", "d"), [ ( AbstractAnnotation( @@ -684,6 +684,6 @@ def test_annotation_store() -> None: "test import and export of todo annotation", ], ) -def test_conversion_between_json_and_annotation(annotation: AbstractAnnotation, json: dict) -> None: - assert annotation.to_json() == json - assert type(annotation).from_json(json) == annotation +def test_conversion_between_json_and_annotation(annotation: AbstractAnnotation, d: dict) -> None: + assert annotation.to_dict() == d + assert type(annotation).from_dict(d) == annotation diff --git a/tests/library_analyzer/processing/annotations/test_generate_annotations.py b/tests/library_analyzer/processing/annotations/test_generate_annotations.py index cc3c4a03..c92ccda8 100644 --- a/tests/library_analyzer/processing/annotations/test_generate_annotations.py +++ b/tests/library_analyzer/processing/annotations/test_generate_annotations.py @@ -22,7 +22,7 @@ def test_generate_annotations( usages, api, expected_annotations = read_test_data(subfolder) annotations = generate_annotations(api, usages) - assert annotations.to_json()[subfolder] == expected_annotations + assert annotations.to_dict()[subfolder] == expected_annotations def read_test_data(subfolder: str) -> tuple[UsageCountStore, API, dict]: @@ -34,11 +34,11 @@ def read_test_data(subfolder: str) -> tuple[UsageCountStore, API, dict]: with api_json_path.open(encoding="utf-8") as api_file: api_json = json.load(api_file) - api = API.from_json(api_json) + api = API.from_dict(api_json) with usages_json_path.open(encoding="utf-8") as usages_file: usages_json = json.load(usages_file) - usages = UsageCountStore.from_json(usages_json) + usages = UsageCountStore.from_dict(usages_json) with annotations_json_path.open(encoding="utf-8") as annotations_file: annotations_json = json.load(annotations_file) diff --git a/tests/library_analyzer/processing/api/docstring_parsing/test_epydoc_parser.py b/tests/library_analyzer/processing/api/docstring_parsing/test_epydoc_parser.py index b6bbf3a8..3083ba6b 100644 --- a/tests/library_analyzer/processing/api/docstring_parsing/test_epydoc_parser.py +++ b/tests/library_analyzer/processing/api/docstring_parsing/test_epydoc_parser.py @@ -2,10 +2,10 @@ import pytest from library_analyzer.processing.api.docstring_parsing import EpydocParser from library_analyzer.processing.api.model import ( - ClassDocumentation, - FunctionDocumentation, + ClassDocstring, + FunctionDocstring, ParameterAssignment, - ParameterDocumentation, + ParameterDocstring, ) @@ -36,14 +36,14 @@ class C: [ ( class_with_documentation, - ClassDocumentation( + ClassDocstring( description="Lorem ipsum. Code::\n\npass\n\nDolor sit amet.", full_docstring="Lorem ipsum. Code::\n\n pass\n\nDolor sit amet.", ), ), ( class_without_documentation, - ClassDocumentation( + ClassDocstring( description="", full_docstring="", ), @@ -57,7 +57,7 @@ class C: def test_get_class_documentation( epydoc_parser: EpydocParser, python_code: str, - expected_class_documentation: ClassDocumentation, + expected_class_documentation: ClassDocstring, ) -> None: node = astroid.extract_node(python_code) @@ -91,14 +91,14 @@ def f(): [ ( function_with_documentation, - FunctionDocumentation( + FunctionDocstring( description="Lorem ipsum. Code::\n\npass\n\nDolor sit amet.", full_docstring="Lorem ipsum. Code::\n\n pass\n\nDolor sit amet.", ), ), ( function_without_documentation, - FunctionDocumentation( + FunctionDocstring( description="", full_docstring="", ), @@ -112,7 +112,7 @@ def f(): def test_get_function_documentation( epydoc_parser: EpydocParser, python_code: str, - expected_function_documentation: FunctionDocumentation, + expected_function_documentation: FunctionDocstring, ) -> None: node = astroid.extract_node(python_code) @@ -166,7 +166,7 @@ def f(): class_with_parameters, "p", ParameterAssignment.POSITION_OR_NAME, - ParameterDocumentation( + ParameterDocstring( type="int", default_value="1", description="foo defaults to 1", @@ -176,7 +176,7 @@ def f(): class_with_parameters, "missing", ParameterAssignment.POSITION_OR_NAME, - ParameterDocumentation( + ParameterDocstring( type="", default_value="", description="", @@ -186,7 +186,7 @@ def f(): function_with_parameters, "no_type_no_default", ParameterAssignment.POSITION_OR_NAME, - ParameterDocumentation( + ParameterDocstring( type="", default_value="", description="no type and no default", @@ -196,7 +196,7 @@ def f(): function_with_parameters, "type_no_default", ParameterAssignment.POSITION_OR_NAME, - ParameterDocumentation( + ParameterDocstring( type="int", default_value="", description="type but no default", @@ -206,7 +206,7 @@ def f(): function_with_parameters, "with_default", ParameterAssignment.POSITION_OR_NAME, - ParameterDocumentation( + ParameterDocstring( type="int", default_value="2", description="foo that defaults to 2", @@ -216,7 +216,7 @@ def f(): function_with_parameters, "missing", ParameterAssignment.POSITION_OR_NAME, - ParameterDocumentation(type="", default_value="", description=""), + ParameterDocstring(type="", default_value="", description=""), ), ], ids=[ @@ -233,7 +233,7 @@ def test_get_parameter_documentation( python_code: str, parameter_name: str, parameter_assigned_by: ParameterAssignment, - expected_parameter_documentation: ParameterDocumentation, + expected_parameter_documentation: ParameterDocstring, ) -> None: node = astroid.extract_node(python_code) assert isinstance(node, astroid.ClassDef | astroid.FunctionDef) diff --git a/tests/library_analyzer/processing/api/docstring_parsing/test_googledoc_parser.py b/tests/library_analyzer/processing/api/docstring_parsing/test_googledoc_parser.py new file mode 100644 index 00000000..cc98332b --- /dev/null +++ b/tests/library_analyzer/processing/api/docstring_parsing/test_googledoc_parser.py @@ -0,0 +1,564 @@ +import astroid +import pytest +from library_analyzer.processing.api.docstring_parsing import GoogleDocParser +from library_analyzer.processing.api.model import ( + AttributeAssignment, + AttributeDocstring, + ClassDocstring, + FunctionDocstring, + ParameterAssignment, + ParameterDocstring, + ResultDocstring, +) + + +@pytest.fixture() +def googlestyledoc_parser() -> GoogleDocParser: + return GoogleDocParser() + + +# language=python +class_with_documentation = ''' +class C: + """ + Lorem ipsum. Code:: + + pass + + Dolor sit amet. + """ +''' + +# language=python +class_without_documentation = """ +class C: + pass +""" + + +@pytest.mark.parametrize( + ("python_code", "expected_class_documentation"), + [ + ( + class_with_documentation, + ClassDocstring( + description="Lorem ipsum. Code::\n\npass\n\nDolor sit amet.", + full_docstring="Lorem ipsum. Code::\n\n pass\n\nDolor sit amet.", + ), + ), + ( + class_without_documentation, + ClassDocstring( + description="", + full_docstring="", + ), + ), + ], + ids=[ + "class with documentation", + "class without documentation", + ], +) +def test_get_class_documentation( + googlestyledoc_parser: GoogleDocParser, + python_code: str, + expected_class_documentation: ClassDocstring, +) -> None: + node = astroid.extract_node(python_code) + + assert isinstance(node, astroid.ClassDef) + assert googlestyledoc_parser.get_class_documentation(node) == expected_class_documentation + + +# language=python +function_with_documentation = ''' +def f(): + """ + Lorem ipsum. Code:: + + pass + + Dolor sit amet. + """ + + pass +''' + +# language=python +function_without_documentation = """ +def f(): + pass +""" + + +@pytest.mark.parametrize( + ("python_code", "expected_function_documentation"), + [ + ( + function_with_documentation, + FunctionDocstring( + description="Lorem ipsum. Code::\n\npass\n\nDolor sit amet.", + full_docstring="Lorem ipsum. Code::\n\n pass\n\nDolor sit amet.", + ), + ), + ( + function_without_documentation, + FunctionDocstring( + description="", + full_docstring="", + ), + ), + ], + ids=[ + "function with documentation", + "function without documentation", + ], +) +def test_get_function_documentation( + googlestyledoc_parser: GoogleDocParser, + python_code: str, + expected_function_documentation: FunctionDocstring, +) -> None: + node = astroid.extract_node(python_code) + + assert isinstance(node, astroid.FunctionDef) + assert googlestyledoc_parser.get_function_documentation(node) == expected_function_documentation + + +# language=python +class_with_parameters = ''' +# noinspection PyUnresolvedReferences,PyIncorrectDocstring +class C: + """Lorem ipsum. + + Dolor sit amet. + + Args: + p (int): foo. Defaults to 1. + """ + + def __init__(self): + pass +''' + +# language=python +function_with_parameters = ''' +# noinspection PyUnresolvedReferences,PyIncorrectDocstring +def f(): + """Lorem ipsum. + + Dolor sit amet. + + Args: + no_type_no_default: no type and no default. + type_no_default (int): type but no default. + with_default (int): foo. Defaults to 2. + *args (int): foo: *args + **kwargs (int): foo: **kwargs + """ + + pass +''' + +# language=python +function_with_attributes_and_parameters = ''' +# noinspection PyUnresolvedReferences,PyIncorrectDocstring +def f(): + """Lorem ipsum. + + Dolor sit amet. + + Attributes: + p (int): foo. Defaults to 2. + + Args: + q (int): foo. Defaults to 2. + + """ + + pass +''' + + +@pytest.mark.parametrize( + ("python_code", "parameter_name", "parameter_assigned_by", "expected_parameter_documentation"), + [ + ( + class_with_parameters, + "p", + ParameterAssignment.POSITION_OR_NAME, + ParameterDocstring( + type="int", + default_value="1", + description="foo. Defaults to 1.", + ), + ), + ( + class_with_parameters, + "missing", + ParameterAssignment.POSITION_OR_NAME, + ParameterDocstring( + type="", + default_value="", + description="", + ), + ), + ( + function_with_parameters, + "no_type_no_default", + ParameterAssignment.POSITION_OR_NAME, + ParameterDocstring( + type="", + default_value="", + description="no type and no default.", + ), + ), + ( + function_with_parameters, + "type_no_default", + ParameterAssignment.POSITION_OR_NAME, + ParameterDocstring( + type="int", + default_value="", + description="type but no default.", + ), + ), + ( + function_with_parameters, + "with_default", + ParameterAssignment.POSITION_OR_NAME, + ParameterDocstring( + type="int", + default_value="2", + description="foo. Defaults to 2.", + ), + ), + ( + function_with_parameters, + "*args", + ParameterAssignment.POSITIONAL_VARARG, + ParameterDocstring( + type="int", + default_value="", + description="foo: *args", + ), + ), + ( + function_with_parameters, + "**kwargs", + ParameterAssignment.NAMED_VARARG, + ParameterDocstring( + type="int", + default_value="", + description="foo: **kwargs", + ), + ), + ( + function_with_parameters, + "missing", + ParameterAssignment.POSITION_OR_NAME, + ParameterDocstring(type="", default_value="", description=""), + ), + ( + function_with_attributes_and_parameters, + "q", + ParameterAssignment.POSITION_OR_NAME, + ParameterDocstring( + type="int", + default_value="2", + description="foo. Defaults to 2.", + ), + ), + ( + function_with_attributes_and_parameters, + "p", + ParameterAssignment.POSITION_OR_NAME, + ParameterDocstring( + type="", + default_value="", + description="", + ), + ), + ], + ids=[ + "existing class parameter", + "missing class parameter", + "function parameter with no type and no default", + "function parameter with type and no default", + "function parameter with default", + "function parameter with positional vararg", + "function parameter with named vararg", + "missing function parameter", + "function with attributes and parameters existing parameter", + "function with attributes and parameters missing parameter", + ], +) +def test_get_parameter_documentation( + googlestyledoc_parser: GoogleDocParser, + python_code: str, + parameter_name: str, + parameter_assigned_by: ParameterAssignment, + expected_parameter_documentation: ParameterDocstring, +) -> None: + node = astroid.extract_node(python_code) + assert isinstance(node, astroid.ClassDef | astroid.FunctionDef) + + # Find the constructor + if isinstance(node, astroid.ClassDef): + for method in node.mymethods(): + if method.name == "__init__": + node = method + + assert isinstance(node, astroid.FunctionDef) + assert ( + googlestyledoc_parser.get_parameter_documentation(node, parameter_name, parameter_assigned_by) + == expected_parameter_documentation + ) + + +# language=python +class_with_attributes = ''' +# noinspection PyUnresolvedReferences,PyIncorrectDocstring +class C: + """Lorem ipsum. + + Dolor sit amet. + + Attributes: + p (int): foo. Defaults to 1. + """ + + def __init__(self): + pass +''' + +# language=python +function_with_attributes = ''' +# noinspection PyUnresolvedReferences,PyIncorrectDocstring +def f(): + """Lorem ipsum. + + Dolor sit amet. + + Attributes: + no_type_no_default: no type and no default. + type_no_default (int): type but no default. + with_default (int): foo. Defaults to 2. + *args (int): foo: *args + **kwargs (int): foo: **kwargs + """ + + pass +''' + + +@pytest.mark.parametrize( + ("python_code", "attribute_name", "attribute_assigned_by", "expected_attribute_documentation"), + [ + ( + class_with_attributes, + "p", + AttributeAssignment.POSITION_OR_NAME, + AttributeDocstring( + type="int", + default_value="1", + description="foo. Defaults to 1.", + ), + ), + ( + class_with_attributes, + "missing", + AttributeAssignment.POSITION_OR_NAME, + AttributeDocstring( + type="", + default_value="", + description="", + ), + ), + ( + function_with_attributes, + "no_type_no_default", + AttributeAssignment.POSITION_OR_NAME, + AttributeDocstring( + type="", + default_value="", + description="no type and no default.", + ), + ), + ( + function_with_attributes, + "type_no_default", + AttributeAssignment.POSITION_OR_NAME, + AttributeDocstring( + type="int", + default_value="", + description="type but no default.", + ), + ), + ( + function_with_attributes, + "with_default", + AttributeAssignment.POSITION_OR_NAME, + AttributeDocstring( + type="int", + default_value="2", + description="foo. Defaults to 2.", + ), + ), + ( + function_with_attributes, + "*args", + AttributeAssignment.POSITIONAL_VARARG, + AttributeDocstring( + type="int", + default_value="", + description="foo: *args", + ), + ), + ( + function_with_attributes, + "**kwargs", + AttributeAssignment.NAMED_VARARG, + AttributeDocstring( + type="int", + default_value="", + description="foo: **kwargs", + ), + ), + ( + function_with_attributes, + "missing", + AttributeAssignment.POSITION_OR_NAME, + AttributeDocstring(type="", default_value="", description=""), + ), + ( + function_with_attributes_and_parameters, + "p", + AttributeAssignment.POSITION_OR_NAME, + AttributeDocstring( + type="int", + default_value="2", + description="foo. Defaults to 2.", + ), + ), + ( + function_with_attributes_and_parameters, + "q", + AttributeAssignment.POSITION_OR_NAME, + AttributeDocstring( + type="", + default_value="", + description="", + ), + ), + ], + ids=[ + "existing class attribute", + "missing class attribute", + "function attribute with no type and no default", + "function attribute with type and no default", + "function attribute with default", + "function attribute with positional vararg", + "function attribute with named vararg", + "missing function attribute", + "function with attributes and parameters existing attribute", + "function with attributes and parameters missing attribute", + ], +) +def test_get_attribute_documentation( + googlestyledoc_parser: GoogleDocParser, + python_code: str, + attribute_name: str, + attribute_assigned_by: AttributeAssignment, + expected_attribute_documentation: AttributeDocstring, +) -> None: + node = astroid.extract_node(python_code) + assert isinstance(node, astroid.ClassDef | astroid.FunctionDef) + + # Find the constructor + if isinstance(node, astroid.ClassDef): + for method in node.mymethods(): + if method.name == "__init__": + node = method + + assert isinstance(node, astroid.FunctionDef) + assert ( + googlestyledoc_parser.get_attribute_documentation(node, attribute_name, attribute_assigned_by) + == expected_attribute_documentation + ) + + +# language=python +function_with_return_value_and_type = ''' +# noinspection PyUnresolvedReferences,PyIncorrectDocstring +def f(): + """Lorem ipsum. + + Dolor sit amet. + + Returns: + int: this will be the return value. + """ + + pass +''' + +# language=python +function_with_return_value_no_type = ''' +# noinspection PyUnresolvedReferences,PyIncorrectDocstring +def f(): + """Lorem ipsum. + + Dolor sit amet. + + Returns: + int + """ + + pass +''' + +# language=python +function_without_return_value = ''' +# noinspection PyUnresolvedReferences,PyIncorrectDocstring +def f(): + """Lorem ipsum. + + Dolor sit amet. + """ + + pass +''' + + +@pytest.mark.parametrize( + ("python_code", "expected_return_documentation"), + [ + ( + function_with_return_value_and_type, + ResultDocstring(type="int", description="this will be the return value."), + ), + ( + function_with_return_value_no_type, + ResultDocstring(type="", description="int"), + ), + (function_without_return_value, ResultDocstring(type="", description="")), + ], + ids=["existing return value and type", "existing return value no description", "function without return value"], +) +def test_get_result_documentation( + googlestyledoc_parser: GoogleDocParser, + python_code: str, + expected_return_documentation: ResultDocstring, +) -> None: + node = astroid.extract_node(python_code) + assert isinstance(node, astroid.ClassDef | astroid.FunctionDef) + + # Find the constructor + if isinstance(node, astroid.ClassDef): + for method in node.mymethods(): + if method.name == "__init__": + node = method + + assert isinstance(node, astroid.FunctionDef) + assert googlestyledoc_parser.get_result_documentation(node) == expected_return_documentation diff --git a/tests/library_analyzer/processing/api/docstring_parsing/test_numpydoc_parser.py b/tests/library_analyzer/processing/api/docstring_parsing/test_numpydoc_parser.py index 17bed98c..a2cc92ec 100644 --- a/tests/library_analyzer/processing/api/docstring_parsing/test_numpydoc_parser.py +++ b/tests/library_analyzer/processing/api/docstring_parsing/test_numpydoc_parser.py @@ -2,10 +2,10 @@ import pytest from library_analyzer.processing.api.docstring_parsing import NumpyDocParser from library_analyzer.processing.api.model import ( - ClassDocumentation, - FunctionDocumentation, + ClassDocstring, + FunctionDocstring, ParameterAssignment, - ParameterDocumentation, + ParameterDocstring, ) @@ -36,14 +36,14 @@ class C: [ ( class_with_documentation, - ClassDocumentation( + ClassDocstring( description="Lorem ipsum. Code::\n\npass\n\nDolor sit amet.", full_docstring="Lorem ipsum. Code::\n\n pass\n\nDolor sit amet.", ), ), ( class_without_documentation, - ClassDocumentation( + ClassDocstring( description="", full_docstring="", ), @@ -57,7 +57,7 @@ class C: def test_get_class_documentation( numpydoc_parser: NumpyDocParser, python_code: str, - expected_class_documentation: ClassDocumentation, + expected_class_documentation: ClassDocstring, ) -> None: node = astroid.extract_node(python_code) @@ -89,14 +89,14 @@ def f(): [ ( function_with_documentation, - FunctionDocumentation( + FunctionDocstring( description="Lorem ipsum. Code::\n\npass\n\nDolor sit amet.", full_docstring="Lorem ipsum. Code::\n\n pass\n\nDolor sit amet.", ), ), ( function_without_documentation, - FunctionDocumentation(description=""), + FunctionDocstring(description=""), ), ], ids=[ @@ -107,7 +107,7 @@ def f(): def test_get_function_documentation( numpydoc_parser: NumpyDocParser, python_code: str, - expected_function_documentation: FunctionDocumentation, + expected_function_documentation: FunctionDocstring, ) -> None: node = astroid.extract_node(python_code) @@ -176,7 +176,7 @@ def f(): class_with_parameters, "p", ParameterAssignment.POSITION_OR_NAME, - ParameterDocumentation( + ParameterDocstring( type="int", default_value="1", description="foo", @@ -186,7 +186,7 @@ def f(): class_with_parameters, "missing", ParameterAssignment.POSITION_OR_NAME, - ParameterDocumentation( + ParameterDocstring( type="", default_value="", description="", @@ -196,7 +196,7 @@ def f(): function_with_parameters, "no_type_no_default", ParameterAssignment.POSITION_OR_NAME, - ParameterDocumentation( + ParameterDocstring( type="", default_value="", description="foo: no_type_no_default. Code::\n\n pass", @@ -206,7 +206,7 @@ def f(): function_with_parameters, "type_no_default", ParameterAssignment.POSITION_OR_NAME, - ParameterDocumentation( + ParameterDocstring( type="int", default_value="", description="foo: type_no_default", @@ -216,7 +216,7 @@ def f(): function_with_parameters, "optional_unknown_default", ParameterAssignment.POSITION_OR_NAME, - ParameterDocumentation( + ParameterDocstring( type="int", default_value="", description="foo: optional_unknown_default", @@ -226,7 +226,7 @@ def f(): function_with_parameters, "with_default_syntax_1", ParameterAssignment.POSITION_OR_NAME, - ParameterDocumentation( + ParameterDocstring( type="int", default_value="1", description="foo: with_default_syntax_1", @@ -236,19 +236,19 @@ def f(): function_with_parameters, "with_default_syntax_2", ParameterAssignment.POSITION_OR_NAME, - ParameterDocumentation(type="int", default_value="2", description="foo: with_default_syntax_2"), + ParameterDocstring(type="int", default_value="2", description="foo: with_default_syntax_2"), ), ( function_with_parameters, "with_default_syntax_3", ParameterAssignment.POSITION_OR_NAME, - ParameterDocumentation(type="int", default_value="3", description="foo: with_default_syntax_3"), + ParameterDocstring(type="int", default_value="3", description="foo: with_default_syntax_3"), ), ( function_with_parameters, "grouped_parameter_1", ParameterAssignment.POSITION_OR_NAME, - ParameterDocumentation( + ParameterDocstring( type="int", default_value="4", description="foo: grouped_parameter_1 and grouped_parameter_2", @@ -258,7 +258,7 @@ def f(): function_with_parameters, "grouped_parameter_2", ParameterAssignment.POSITION_OR_NAME, - ParameterDocumentation( + ParameterDocstring( type="int", default_value="4", description="foo: grouped_parameter_1 and grouped_parameter_2", @@ -268,7 +268,7 @@ def f(): function_with_parameters, "args", ParameterAssignment.POSITIONAL_VARARG, - ParameterDocumentation( + ParameterDocstring( type="int", default_value="", description="foo: *args", @@ -278,7 +278,7 @@ def f(): function_with_parameters, "kwargs", ParameterAssignment.NAMED_VARARG, - ParameterDocumentation( + ParameterDocstring( type="int", default_value="", description="foo: **kwargs", @@ -288,7 +288,7 @@ def f(): function_with_parameters, "missing", ParameterAssignment.POSITION_OR_NAME, - ParameterDocumentation(type="", default_value="", description=""), + ParameterDocstring(type="", default_value="", description=""), ), ], ids=[ @@ -312,7 +312,7 @@ def test_get_parameter_documentation( python_code: str, parameter_name: str, parameter_assigned_by: ParameterAssignment, - expected_parameter_documentation: ParameterDocumentation, + expected_parameter_documentation: ParameterDocstring, ) -> None: node = astroid.extract_node(python_code) assert isinstance(node, astroid.ClassDef | astroid.FunctionDef) diff --git a/tests/library_analyzer/processing/api/docstring_parsing/test_plaintext_docstring_parser.py b/tests/library_analyzer/processing/api/docstring_parsing/test_plaintext_docstring_parser.py index 92fc4d14..59d7ba8e 100644 --- a/tests/library_analyzer/processing/api/docstring_parsing/test_plaintext_docstring_parser.py +++ b/tests/library_analyzer/processing/api/docstring_parsing/test_plaintext_docstring_parser.py @@ -4,10 +4,10 @@ PlaintextDocstringParser, ) from library_analyzer.processing.api.model import ( - ClassDocumentation, - FunctionDocumentation, + ClassDocstring, + FunctionDocstring, ParameterAssignment, - ParameterDocumentation, + ParameterDocstring, ) @@ -39,14 +39,14 @@ class C: [ ( class_with_documentation, - ClassDocumentation( + ClassDocstring( description="Lorem ipsum.\n\nDolor sit amet.", full_docstring="Lorem ipsum.\n\nDolor sit amet.", ), ), ( class_without_documentation, - ClassDocumentation( + ClassDocstring( description="", full_docstring="", ), @@ -60,7 +60,7 @@ class C: def test_get_class_documentation( plaintext_docstring_parser: PlaintextDocstringParser, python_code: str, - expected_class_documentation: ClassDocumentation, + expected_class_documentation: ClassDocstring, ) -> None: node = astroid.extract_node(python_code) @@ -90,14 +90,14 @@ def f(p: int): [ ( function_with_documentation, - FunctionDocumentation( + FunctionDocstring( description="Lorem ipsum.\n\nDolor sit amet.", full_docstring="Lorem ipsum.\n\nDolor sit amet.", ), ), ( function_without_documentation, - FunctionDocumentation(description=""), + FunctionDocstring(description=""), ), ], ids=[ @@ -108,7 +108,7 @@ def f(p: int): def test_get_function_documentation( plaintext_docstring_parser: PlaintextDocstringParser, python_code: str, - expected_function_documentation: FunctionDocumentation, + expected_function_documentation: FunctionDocstring, ) -> None: node = astroid.extract_node(python_code) @@ -122,7 +122,7 @@ def test_get_function_documentation( ( function_with_documentation, "p", - ParameterDocumentation( + ParameterDocstring( type="", default_value="", description="", @@ -131,7 +131,7 @@ def test_get_function_documentation( ( function_without_documentation, "p", - ParameterDocumentation( + ParameterDocstring( type="", default_value="", description="", @@ -147,7 +147,7 @@ def test_get_parameter_documentation( plaintext_docstring_parser: PlaintextDocstringParser, python_code: str, parameter_name: str, - expected_parameter_documentation: ParameterDocumentation, + expected_parameter_documentation: ParameterDocstring, ) -> None: node = astroid.extract_node(python_code) assert isinstance(node, astroid.FunctionDef) diff --git a/tests/library_analyzer/processing/api/docstring_parsing/test_restdoc_parser.py b/tests/library_analyzer/processing/api/docstring_parsing/test_restdoc_parser.py new file mode 100644 index 00000000..33b6d238 --- /dev/null +++ b/tests/library_analyzer/processing/api/docstring_parsing/test_restdoc_parser.py @@ -0,0 +1,355 @@ +# Todo Function with return value +import astroid +import pytest +from library_analyzer.processing.api.docstring_parsing import RestDocParser +from library_analyzer.processing.api.model import ( + ClassDocstring, + FunctionDocstring, + ParameterAssignment, + ParameterDocstring, + ResultDocstring, +) + + +@pytest.fixture() +def restdoc_parser() -> RestDocParser: + return RestDocParser() + + +class_with_documentation = ''' +class C: + """ + Lorem ipsum. Code:: + + pass + + Dolor sit amet. + """ +''' + +class_without_documentation = """ +class C: + pass +""" + + +@pytest.mark.parametrize( + ("python_code", "expected_class_documentation"), + [ + ( + class_with_documentation, + ClassDocstring( + description="Lorem ipsum. Code::\n\npass\n\nDolor sit amet.", + full_docstring="Lorem ipsum. Code::\n\n pass\n\nDolor sit amet.", + ), + ), + ( + class_without_documentation, + ClassDocstring( + description="", + full_docstring="", + ), + ), + ], + ids=[ + "class with documentation", + "class without documentation", + ], +) +def test_get_class_documentation( + restdoc_parser: RestDocParser, + python_code: str, + expected_class_documentation: ClassDocstring, +) -> None: + node = astroid.extract_node(python_code) + + assert isinstance(node, astroid.ClassDef) + assert restdoc_parser.get_class_documentation(node) == expected_class_documentation + + +# language=python +function_with_documentation = ''' +def f(): + """ + Lorem ipsum. Code:: + + pass + + Dolor sit amet. + """ + + pass +''' + +# language=python +function_without_documentation = """ +def f(): + pass +""" + + +@pytest.mark.parametrize( + ("python_code", "expected_function_documentation"), + [ + ( + function_with_documentation, + FunctionDocstring( + description="Lorem ipsum. Code::\n\npass\n\nDolor sit amet.", + full_docstring="Lorem ipsum. Code::\n\n pass\n\nDolor sit amet.", + ), + ), + ( + function_without_documentation, + FunctionDocstring( + description="", + full_docstring="", + ), + ), + ], + ids=[ + "function with documentation", + "function without documentation", + ], +) +def test_get_function_documentation( + restdoc_parser: RestDocParser, + python_code: str, + expected_function_documentation: FunctionDocstring, +) -> None: + node = astroid.extract_node(python_code) + + assert isinstance(node, astroid.FunctionDef) + assert restdoc_parser.get_function_documentation(node) == expected_function_documentation + + +# language=python +class_with_parameters = ''' +# noinspection PyUnresolvedReferences,PyIncorrectDocstring +class C: + """ + Lorem ipsum. + + Dolor sit amet. + + :param p: foo defaults to 1 + :type p: int + """ + + def __init__(self): + pass +''' + +# language=python +function_with_parameters = ''' +# noinspection PyUnresolvedReferences,PyIncorrectDocstring +def f(): + """ + Lorem ipsum. + + Dolor sit amet. + + :param no_type_no_default: no type and no default + :param type_no_default: type but no default + :type type_no_default: int + :param with_default: foo that defaults to 2 + :type with_default: int + :param *args: foo: *args + :type *args: int + :param **kwargs: foo: **kwargs + :type **kwargs: int + """ + + pass +''' + + +@pytest.mark.parametrize( + ("python_code", "parameter_name", "parameter_assigned_by", "expected_parameter_documentation"), + [ + ( + class_with_parameters, + "p", + ParameterAssignment.POSITION_OR_NAME, + ParameterDocstring( + type="int", + default_value="1", + description="foo defaults to 1", + ), + ), + ( + class_with_parameters, + "missing", + ParameterAssignment.POSITION_OR_NAME, + ParameterDocstring( + type="", + default_value="", + description="", + ), + ), + ( + function_with_parameters, + "no_type_no_default", + ParameterAssignment.POSITION_OR_NAME, + ParameterDocstring( + type="", + default_value="", + description="no type and no default", + ), + ), + ( + function_with_parameters, + "type_no_default", + ParameterAssignment.POSITION_OR_NAME, + ParameterDocstring( + type="int", + default_value="", + description="type but no default", + ), + ), + ( + function_with_parameters, + "with_default", + ParameterAssignment.POSITION_OR_NAME, + ParameterDocstring( + type="int", + default_value="2", + description="foo that defaults to 2", + ), + ), + ( + function_with_parameters, + "*args", + ParameterAssignment.POSITIONAL_VARARG, + ParameterDocstring( + type="int", + default_value="", + description="foo: *args", + ), + ), + ( + function_with_parameters, + "**kwargs", + ParameterAssignment.NAMED_VARARG, + ParameterDocstring( + type="int", + default_value="", + description="foo: **kwargs", + ), + ), + ( + function_with_parameters, + "missing", + ParameterAssignment.POSITION_OR_NAME, + ParameterDocstring(type="", default_value="", description=""), + ), + ], + ids=[ + "existing class parameter", + "missing class parameter", + "function parameter with no type and no default", + "function parameter with type and no default", + "function parameter with default", + "function parameter with positional vararg", + "function parameter with named vararg", + "missing function parameter", + ], +) +def test_get_parameter_documentation( + restdoc_parser: RestDocParser, + python_code: str, + parameter_name: str, + parameter_assigned_by: ParameterAssignment, + expected_parameter_documentation: ParameterDocstring, +) -> None: + node = astroid.extract_node(python_code) + assert isinstance(node, astroid.ClassDef | astroid.FunctionDef) + + # Find the constructor + if isinstance(node, astroid.ClassDef): + for method in node.mymethods(): + if method.name == "__init__": + node = method + + assert isinstance(node, astroid.FunctionDef) + assert ( + restdoc_parser.get_parameter_documentation(node, parameter_name, parameter_assigned_by) + == expected_parameter_documentation + ) + + +# language=python +function_with_return_value_and_type = ''' +# noinspection PyUnresolvedReferences,PyIncorrectDocstring +def f(): + """ + Lorem ipsum. + + Dolor sit amet. + + :return: return value + :rtype: bool + """ + + pass +''' + +# language=python +function_with_return_value_no_type = ''' +# noinspection PyUnresolvedReferences,PyIncorrectDocstring +def f(): + """ + Lorem ipsum. + + Dolor sit amet. + + :return: return value + """ + + pass +''' + +# language=python +function_without_return_value = ''' +# noinspection PyUnresolvedReferences,PyIncorrectDocstring +def f(): + """ + Lorem ipsum. + + Dolor sit amet. + """ + + pass +''' + + +@pytest.mark.parametrize( + ("python_code", "expected_return_documentation"), + [ + ( + function_with_return_value_and_type, + ResultDocstring(type="bool", description="return value"), + ), + ( + function_with_return_value_no_type, + ResultDocstring(type="", description="return value"), + ), + (function_without_return_value, ResultDocstring(type="", description="")), + ], + ids=["existing return value and type", "existing return value no type", "function without return value"], +) +def test_get_result_documentation( + restdoc_parser: RestDocParser, + python_code: str, + expected_return_documentation: ResultDocstring, +) -> None: + node = astroid.extract_node(python_code) + assert isinstance(node, astroid.ClassDef | astroid.FunctionDef) + + # Find the constructor + if isinstance(node, astroid.ClassDef): + for method in node.mymethods(): + if method.name == "__init__": + node = method + + assert isinstance(node, astroid.FunctionDef) + assert restdoc_parser.get_result_documentation(node) == expected_return_documentation diff --git a/tests/library_analyzer/processing/api/model/test_api.py b/tests/library_analyzer/processing/api/model/test_api.py index 30a582e5..e43bab7e 100644 --- a/tests/library_analyzer/processing/api/model/test_api.py +++ b/tests/library_analyzer/processing/api/model/test_api.py @@ -4,10 +4,11 @@ import pytest from library_analyzer.processing.api._ast_visitor import trim_code from library_analyzer.processing.api.model import ( + API, Class, - ClassDocumentation, + ClassDocstring, Function, - FunctionDocumentation, + FunctionDocstring, ) @@ -215,7 +216,7 @@ def test_cut_documentation_from_code(code: str, expected_code: str) -> None: superclasses=[], is_public=True, reexported_by=[], - documentation=ClassDocumentation( + docstring=ClassDocstring( "this documentation string cannot be used", ), code=code, @@ -230,7 +231,55 @@ def test_cut_documentation_from_code(code: str, expected_code: str) -> None: results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code=code, ) assert api_element.get_formatted_code(cut_documentation=True) == expected_code + "\n" + + +class TestPublicAPI: + def test_should_return_only_public_api_elements(self) -> None: + public_function = Function.from_dict( + {"id": "test/test/publicFunction", "qname": "test.publicFunction", "is_public": True}, + ) + internal_function = Function.from_dict( + {"id": "test/test/internalFunction", "qname": "test.internalFunction", "is_public": False}, + ) + public_method = Function.from_dict( + {"id": "test/test/PublicClass/publicMethod", "qname": "test.PublicClass.publicMethod", "is_public": True}, + ) + internal_method = Function.from_dict( + { + "id": "test/test/PublicClass/internalMethod", + "qname": "test.PublicClass.internalMethod", + "is_public": False, + }, + ) + public_class = Class.from_dict( + { + "id": "test/test/PublicClass", + "qname": "test.PublicClass", + "is_public": True, + "methods": [public_method.id, internal_method.id], + }, + ) + internal_class = Class.from_dict( + {"id": "test/test/InternalClass", "qname": "test.InternalClass", "is_public": False, "methods": []}, + ) + api = API( + distribution="test", + package="test", + version="1.0.0", + ) + api.add_class(public_class) + api.add_class(internal_class) + api.add_function(public_function) + api.add_function(internal_function) + api.add_function(public_method) + api.add_function(internal_method) + + public_api = api.get_public_api() + + assert public_api.class_count() == 1 + assert len(list(public_api.classes.values())[0].methods) == 1 + assert public_api.function_count() == 2 diff --git a/tests/library_analyzer/processing/api/model/test_documentation.py b/tests/library_analyzer/processing/api/model/test_documentation.py index 19f8b9bf..5c8e958d 100644 --- a/tests/library_analyzer/processing/api/model/test_documentation.py +++ b/tests/library_analyzer/processing/api/model/test_documentation.py @@ -1,45 +1,45 @@ import pytest from library_analyzer.processing.api.model import ( - ClassDocumentation, - FunctionDocumentation, - ParameterDocumentation, + ClassDocstring, + FunctionDocstring, + ParameterDocstring, ) @pytest.mark.parametrize( "class_documentation", [ - ClassDocumentation(), - ClassDocumentation(description="foo"), + ClassDocstring(), + ClassDocstring(description="foo"), ], ) def test_dict_conversion_for_class_documentation( - class_documentation: ClassDocumentation, + class_documentation: ClassDocstring, ) -> None: - assert ClassDocumentation.from_dict(class_documentation.to_dict()) == class_documentation + assert ClassDocstring.from_dict(class_documentation.to_dict()) == class_documentation @pytest.mark.parametrize( "function_documentation", [ - FunctionDocumentation(), - FunctionDocumentation(description="foo"), + FunctionDocstring(), + FunctionDocstring(description="foo"), ], ) def test_dict_conversion_for_function_documentation( - function_documentation: FunctionDocumentation, + function_documentation: FunctionDocstring, ) -> None: - assert FunctionDocumentation.from_dict(function_documentation.to_dict()) == function_documentation + assert FunctionDocstring.from_dict(function_documentation.to_dict()) == function_documentation @pytest.mark.parametrize( "parameter_documentation", [ - ParameterDocumentation(), - ParameterDocumentation(type="int", default_value="1", description="foo bar"), + ParameterDocstring(), + ParameterDocstring(type="int", default_value="1", description="foo bar"), ], ) def test_dict_conversion_for_parameter_documentation( - parameter_documentation: ParameterDocumentation, + parameter_documentation: ParameterDocstring, ) -> None: - assert ParameterDocumentation.from_dict(parameter_documentation.to_dict()) == parameter_documentation + assert ParameterDocstring.from_dict(parameter_documentation.to_dict()) == parameter_documentation diff --git a/tests/library_analyzer/processing/api/model/test_types.py b/tests/library_analyzer/processing/api/model/test_types.py index 759a6cba..0e6dacf4 100644 --- a/tests/library_analyzer/processing/api/model/test_types.py +++ b/tests/library_analyzer/processing/api/model/test_types.py @@ -9,7 +9,7 @@ NamedType, Parameter, ParameterAssignment, - ParameterDocumentation, + ParameterDocstring, create_type, ) @@ -79,11 +79,11 @@ ], ) def test_union_from_string(docstring_type: str, expected: dict[str, Any]) -> None: - result = create_type(ParameterDocumentation(docstring_type, "", "")) + result = create_type(ParameterDocstring(docstring_type, "", "")) if result is None: assert expected == {} else: - assert result.to_json() == expected + assert result.to_dict() == expected @pytest.mark.parametrize( @@ -101,7 +101,10 @@ def test_union_from_string(docstring_type: str, expected: dict[str, Any]) -> Non }, ), ( - "Tolerance for singular values computed by svd_solver == 'arpack'.\nMust be of range [1, infinity].\n\n.. versionadded:: 0.18.0", + ( + "Tolerance for singular values computed by svd_solver == 'arpack'.\nMust be of range [1," + " infinity].\n\n.. versionadded:: 0.18.0" + ), { "base_type": "float", "kind": "BoundaryType", @@ -115,11 +118,11 @@ def test_union_from_string(docstring_type: str, expected: dict[str, Any]) -> Non ], ) def test_boundary_from_string(description: str, expected: dict[str, Any]) -> None: - result = create_type(ParameterDocumentation("", "", description)) + result = create_type(ParameterDocstring("", "", description)) if result is None: assert expected == {} else: - assert result.to_json() == expected + assert result.to_dict() == expected @pytest.mark.parametrize( @@ -153,13 +156,13 @@ def test_boundary_and_union_from_string( expected: dict[str, Any], ) -> None: result = create_type( - ParameterDocumentation(type=docstring_type, default_value="", description=docstring_description), + ParameterDocstring(type=docstring_type, default_value="", description=docstring_description), ) if result is None: assert expected == {} else: - assert result.to_json() == expected + assert result.to_dict() == expected def test_correct_hash() -> None: @@ -170,7 +173,7 @@ def test_correct_hash() -> None: default_value="'test_str'", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("'hashvalue'", "r", "r"), + docstring=ParameterDocstring("'hashvalue'", "r", "r"), ) assert hash(parameter) == hash(deepcopy(parameter)) enum_values = frozenset({"a", "b", "c"}) @@ -186,6 +189,7 @@ def test_correct_hash() -> None: assert NamedType("a") != NamedType("b") assert hash(NamedType("a")) != hash(NamedType("b")) attribute = Attribute( + "boundary", "boundary", BoundaryType( base_type="int", @@ -203,7 +207,10 @@ def test_correct_hash() -> None: ("string", "expected"), [ ( - "float, default=0.0 Tolerance for singular values computed by svd_solver == 'arpack'.\nMust be of range [0.0, infinity).\n\n.. versionadded:: 0.18.0", + ( + "float, default=0.0 Tolerance for singular values computed by svd_solver == 'arpack'.\nMust be of range" + " [0.0, infinity).\n\n.. versionadded:: 0.18.0" + ), BoundaryType( base_type="float", min=0, @@ -250,7 +257,10 @@ def test_correct_hash() -> None: ), ), ( - "Tolerance for singular values computed by svd_solver == 'arpack'.\nMust be of range [-2, -1].\n\n.. versionadded:: 0.18.0", + ( + "Tolerance for singular values computed by svd_solver == 'arpack'.\nMust be of range [-2, -1].\n\n.." + " versionadded:: 0.18.0" + ), BoundaryType( base_type="float", min=-2, diff --git a/tests/library_analyzer/processing/api/test_extract_boundary_values.py b/tests/library_analyzer/processing/api/test_extract_boundary_values.py new file mode 100644 index 00000000..4dfbe409 --- /dev/null +++ b/tests/library_analyzer/processing/api/test_extract_boundary_values.py @@ -0,0 +1,226 @@ +from typing import TypeAlias + +import pytest +from library_analyzer.processing.api._extract_boundary_values import extract_boundary +from library_analyzer.processing.api.model import BoundaryType + +_Numeric: TypeAlias = int | float +BoundaryValueType = tuple[str, tuple[_Numeric | str, bool], tuple[_Numeric | str, bool]] + + +# @pytest.mark.skip(reason="Currently not testting this") +@pytest.mark.parametrize( + ("type_string", "description", "expected_boundary"), + [ + ( + "float", + ( + "Damping factor in the range [0.5, 1.0) is the extent to which the current value is maintained relative" + " to incoming values (weighted 1 - damping). This in order to avoid numerical oscillations when" + " updating these values (messages)." + ), + [("float", (0.5, True), (1.0, False))], + ), + ( + "float", + ( + "An upper bound on the fraction of training errors and a lower bound of the fraction of support" + " vectors. Should be in the interval (0, 1]. By default 0.5 will be taken." + ), + [("float", (0.0, False), (1.0, True))], + ), + ( + "non-negative float", + ( + "Complexity parameter used for Minimal Cost-Complexity Pruning. The subtree with the largest cost " + "complexity that is smaller than ccp_alpha will be chosen. By default, no pruning is performed. See " + ":ref:minimal_cost_complexity_pruning for details." + ), + [("float", (0.0, True), ("Infinity", False))], + ), + ( + "{'scale', 'auto'} or float", + ( + "Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.\n\nif gamma='scale' (default) is passed then it" + " uses 1 / (n_features * X.var()) as value of gamma,\nif 'auto', uses 1 / n_features\nif float, must be" + " non-negative.\n\n.. versionchanged: 0.22 The default value of gamma changed from 'auto' to 'scale'." + ), + [("float", (0.0, True), ("Infinity", False))], + ), + ( + "int", + "Degree of the polynomial kernel function ('poly'). Must be non-negative. Ignored by all other kernels.", + [("int", (0, True), ("Infinity", False))], + ), + ( + "int", + "The verbosity level. The default, zero, means silent mode. Range of values is [0, inf].", + [("int", (0, True), ("Infinity", False))], + ), + ( + "int", + "The verbosity level. The default, zero, means silent mode. Range of values is at least 3.", + [("int", (3, True), ("Infinity", False))], + ), + ( + "float", + "Momentum for gradient descent update. Should be between 0 and 1. Only used when solver='sgd'.", + [("float", (0.0, True), (1.0, True))], + ), + ( + "float between 0 and 1", + ( + "Determines the minimum steepness on the reachability plot that constitutes a cluster boundary. For " + "example, an upwards point in the reachability plot is defined by the ratio from one point to its " + "successor being at most 1-xi. Used only when cluster_method='xi'." + ), + [("float", (0.0, True), (1.0, True))], + ), + ( + "float", + "Momentum for gradient descent update. Should be non-positive. Only used when solver='sgd'.", + [("float", ("NegativeInfinity", False), (0.0, True))], + ), + ( + "float", + ( + "Regularization parameter. The strength of the regularization is inversely proportional to C. Must be " + "strictly positive." + ), + [("float", (0.0, False), ("Infinity", False))], + ), + ( + "int or float", + ( + "If bootstrap is True, the number of samples to draw from X to train each base estimator.\n\nIf None (" + "default), then draw X.shape[0] samples.\nIf int, then draw max_samples samples.\n If float, " + "then draw max_samples * X.shape[0] samples. Thus, max_samples should be in the interval (0.0, " + "1.0].\n\n.. versionadded: 0.22" + ), + [("float", (0.0, False), (1.0, True))], + ), + ( + "int or float", + ( + "If bootstrap is True, the number of samples to draw from X to train each base estimator.\n\nIf None (" + "default), then draw X.shape[0] samples.\nIf int, then max_samples values in [0, 10].\n If float, " + "then draw max_samples * X.shape[0] samples. Thus, max_samples should be in the interval (0.0, " + "1.0].\n\n.. versionadded: 0.22" + ), + [("int", (0, True), (10, True)), ("float", (0.0, False), (1.0, True))], + ), + ( + "float", + ( + "The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1. l1_ratio=0 corresponds to L2 penalty, " + "l1_ratio=1 to L1. Only used if penalty is 'elasticnet'." + ), + [("float", (0.0, True), (1.0, True))], + ), + ( + "float", + ( + "The Elastic Net mixing parameter, with 0 < l1_ratio <= 1. l1_ratio=0 corresponds to L2 penalty, " + "l1_ratio=1 to L1. Only used if penalty is 'elasticnet'." + ), + [("float", (0.0, False), (1.0, True))], + ), + ( + "float", + ( + "The Elastic Net mixing parameter, with 0 <= l1_ratio < 1. l1_ratio=0 corresponds to L2 penalty, " + "l1_ratio=1 to L1. Only used if penalty is 'elasticnet'." + ), + [("float", (0.0, True), (1.0, False))], + ), + ( + "float", + ( + "The Elastic Net mixing parameter, with 0 < l1_ratio < 1. l1_ratio=0 corresponds to L2 penalty, " + "l1_ratio=1 to L1. Only used if penalty is 'elasticnet'." + ), + [("float", (0.0, False), (1.0, False))], + ), + ( + "float", + ( + "The Elastic Net mixing parameter, with 1 > l1_ratio > 0. l1_ratio=0 corresponds to L2 penalty, " + "l1_ratio=1 to L1. Only used if penalty is 'elasticnet'." + ), + [("float", (0.0, False), (1.0, False))], + ), + ( + "float", + ( + "The Elastic Net mixing parameter, with l1_ratio > 0 and < 1. l1_ratio=0 corresponds to L2 penalty, " + "l1_ratio=1 to L1. Only used if penalty is 'elasticnet'." + ), + [("float", (0.0, False), (1.0, False))], + ), + ( + "float", + ( + "The Elastic Net mixing parameter, with l1_ratio >= 0 and < 1. l1_ratio=0 corresponds to L2 penalty, " + "l1_ratio=1 to L1. Only used if penalty is 'elasticnet'." + ), + [("float", (0.0, True), (1.0, False))], + ), + ( + "int > 1 or float between 0 and 1", + ( + "Minimum number of samples in an OPTICS cluster, expressed as an absolute number or a fraction of the " + "number of samples (rounded to be at least 2). If None, the value of min_samples is used instead. Used " + "only when cluster_method='xi'." + ), + [("int", (1, False), ("Infinity", False)), ("float", (0.0, True), (1.0, True))], + ), + ("float ([0, 1])", "abc", [("float", (0.0, True), (1.0, True))]), + ("bool", "Whether to allow array.ndim > 2", []), + ( + 'dict, list of dicts, "balanced", or None', + ( + "Weights associated with classes in the form {class_label: weight}. If not given, all classes are" + " supposed to have weight one. For multi-output problems, a list of dicts can be provided in the same" + " order as the columns of y.\n\nNote that for multioutput (including multilabel) weights should be" + " defined for each class of every column in its own dict. For example, for four-class multilabel" + " classification weights should be [{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of" + ' [{1:1}, {2:5}, {3:1}, {4:1}].\n\nThe "balanced" mode uses the values of y to automatically adjust' + " weights inversely proportional to class frequencies in the input data: n_samples / (n_classes *" + " np.bincount(y)).\n\nFor multi-output, the weights of each column of y will be multiplied." + ), + [], + ), + ( + "int, RandomState instance or None", + ( + "Controls the randomness of the estimator. The features are always randomly permuted at each split," + ' even if splitter is set to "best". When max_features < n_features, the algorithm will select' + " max_features at random at each split before finding the best split among them. But the best found" + " split may vary across different runs, even if max_features=n_features. That is the case, if the" + " improvement of the criterion is identical for several splits and one split has to be selected at" + " random. To obtain a deterministic behaviour during fitting, random_state has to be fixed to an" + " integer. See :term:Glossary for details." + ), + [], + ), + ( + "{'ovo', 'ovr'}", + ( + "Whether to return a one-vs-rest ('ovr') decision function of shape (n_samples, n_classes) as all other" + " classifiers, or the original one-vs-one ('ovo') decision function of libsvm which has shape" + " (n_samples, n_classes * (n_classes - 1) / 2). However, note that internally, one-vs-one ('ovo') is" + " always used as a multi-class strategy to train models; an ovr matrix is only constructed from the ovo" + " matrix. The parameter is ignored for binary classification.\n\n.. versionchanged: 0.19" + " decision_function_shape is 'ovr' by default.\n\n.. versionadded: 0.17 decision_function_shape='ovr'" + " is recommended.\n\n.. versionchanged: 0.17 Deprecated decision_function_shape='ovo' and None." + ), + [], + ), + ], +) +def test_extract_boundaries(type_string: str, description: str, expected_boundary: list[BoundaryValueType]) -> None: + expected = [ + BoundaryType(base_type=type_, min=min_[0], max=max_[0], min_inclusive=min_[1], max_inclusive=max_[1]) + for type_, min_, max_ in expected_boundary + ] + assert extract_boundary(description, type_string) == set(expected) diff --git a/tests/library_analyzer/processing/api/test_extract_valid_literals.py b/tests/library_analyzer/processing/api/test_extract_valid_literals.py index 733f759b..bd961975 100644 --- a/tests/library_analyzer/processing/api/test_extract_valid_literals.py +++ b/tests/library_analyzer/processing/api/test_extract_valid_literals.py @@ -7,43 +7,85 @@ [ ( "str", - 'If "mean", then replace missing values using the mean along each column\nIf "median", then replace missing values using the median along each column\nIf "most_frequent", then replace missing using the most frequent value along each column\nIf "constant", then replace missing values with fill_value\n', + ( + 'If "mean", then replace missing values using the mean along each column ' + 'If "median", then replace missing values using the median along each column. ' + 'If "most_frequent", then replace missing using the most frequent value along each column. ' + 'If "constant", then replace missing values with fill_value.' + ), ['"mean"', '"median"', '"most_frequent"', '"constant"'], ), + ("str or bool", "Valid values are [False, None, 'allow-nan']", ['"True"', '"False"', '"None"', '"allow-nan"']), ( "str", - "If 'mean', then replace missing values using the mean along each column\nIf 'median', then replace missing values using the median along each column\nIf 'most_frequent', then replace missing using the most frequent value along each column\nIf 'constant', then replace missing values with fill_value\n", - ["'median'", "'most_frequent'", "'constant'", "'mean'"], + ( + "If 'mean', then replace missing values using the mean along each column." + "If 'median', then replace " + "missing values using the median along each column. If 'most_frequent', then replace missing using the " + "most frequent value along each column. If 'constant', then replace missing values with fill_value." + ), + ['"median"', '"most_frequent"', '"constant"', '"mean"'], ), ( "str, list or tuple of str", - 'Attribute name(s) given as string or a list/tuple of strings Eg.: ["coef_", "estimator_", ...], "coef_"\n\nIf None, estimator is considered fitted if there exist an attribute that ends with a underscore and does not start with double underscore.', + ( + 'Attribute name(s) given as string or a list/tuple of strings Eg.: ["coef_", "estimator_", ...],' + ' "coef_" If None, estimator is considered fitted if there exist an attribute that ends with a' + " underscore and does not start with double underscore." + ), ["None", "unlistable_str"], ), ( "bool or 'allow-nan'", - "Whether to raise an error on np.inf, np.nan, pd.NA in X. This parameter does not influence whether y can have np.inf, np.nan, pd.NA values. The possibilities are:\n\n\tTrue: Force all values of X to be finite.\n\tFalse: accepts np.inf, np.nan, pd.NA in X.\n\t'allow-nan': accepts only np.nan or pd.NA values in X. Values cannot be infinite.\n\n.. versionadded: 0.20 force_all_finite accepts the string 'allow-nan'.\n\n.. versionchanged: 0.23 Accepts pd.NA and converts it into np.nan", - ["'allow-nan'", "False", "True"], + ( + "Whether to raise an error on np.inf, np.nan, pd.NA in X. This parameter does not influence whether y" + " can have np.inf, np.nan, pd.NA values. The possibilities are: \n\n\tTrue: Force all values of X to be" + " finite. \n\tFalse: accepts np.inf, np.nan, pd.NA in X. \n\t'allow-nan': accepts only np.nan or pd.NA" + " values in X. Values cannot be infinite. \n\n.. versionadded: 0.20 force_all_finite accepts the string" + " 'allow-nan'. \n\n.. versionchanged: 0.23 Accepts pd.NA and converts it into np.nan" + ), + ['"allow-nan"', "False", "True"], ), ( '{"random", "best"}', - 'The strategy used to choose the split at each node. Supported strategies are "best" to choose the best split and "random" to choose the best random split.', + ( + 'The strategy used to choose the split at each node. Supported strategies are "best" to choose the best' + ' split and "random" to choose the best random split.' + ), ['"best"', '"random"'], ), ( "bool or str", - "When set to True, change the display of 'values' and/or 'samples' to be proportions and percentages respectively.", + ( + "When set to True, change the display of 'values' and/or 'samples' to be proportions and percentages " + "respectively." + ), ["False", "True", "unlistable_str"], ), ( "int, RandomState instance or None", - 'Controls the randomness of the estimator. The features are always randomly permuted at each split, even if splitter is set to "best". When max_features < n_features, the algorithm will select max_features at random at each split before finding the best split among them. But the best found split may vary across different runs, even if max_features=n_features. That is the case, if the improvement of the criterion is identical for several splits and one split has to be selected at random. To obtain a deterministic behaviour during fitting, random_state has to be fixed to an integer. See :term:Glossary for details.', + ( + "Controls the randomness of the estimator. The features are always randomly permuted at each split," + ' even if splitter is set to "best". When max_features < n_features, the algorithm will select' + " max_features at random at each split before finding the best split among them. But the best found" + " split may vary across different runs, even if max_features=n_features. That is the case, if the" + " improvement of the criterion is identical for several splits and one split has to be selected at" + " random. To obtain a deterministic behaviour during fitting, random_state has to be fixed to an" + " integer. See :term:Glossary for details." + ), [], ), ("float", "Independent term in kernel function. It is only significant in 'poly' and 'sigmoid'.", []), ( "float", - 'When self.fit_intercept is True, instance vector x becomes [x, self.intercept_scaling], i.e. a "synthetic" feature with constant value equals to intercept_scaling is appended to the instance vector. The intercept becomes intercept_scaling * synthetic feature weight Note! the synthetic feature weight is subject to l1/l2 regularization as all other features. To lessen the effect of regularization on synthetic feature weight (and therefore on the intercept) intercept_scaling has to be increased.', + ( + "When self.fit_intercept is True, instance vector x becomes [x, self.intercept_scaling], i.e. a" + ' "synthetic" feature with constant value equals to intercept_scaling is appended to the instance' + " vector. The intercept becomes intercept_scaling * synthetic feature weight Note! the synthetic" + " feature weight is subject to l1/l2 regularization as all other features. To lessen the effect of" + " regularization on synthetic feature weight (and therefore on the intercept) intercept_scaling has to" + " be increased." + ), [], ), ], diff --git a/tests/library_analyzer/processing/api/test_get_parameter_list.py b/tests/library_analyzer/processing/api/test_get_parameter_list.py index e77b2716..ef0c01d7 100644 --- a/tests/library_analyzer/processing/api/test_get_parameter_list.py +++ b/tests/library_analyzer/processing/api/test_get_parameter_list.py @@ -7,7 +7,7 @@ from library_analyzer.processing.api.model import ( Parameter, ParameterAssignment, - ParameterDocumentation, + ParameterDocstring, ) global_function_empty_parameter_list = """ @@ -45,7 +45,7 @@ def f(**kwargs): default_value=None, assigned_by=ParameterAssignment.POSITION_ONLY, is_public=True, - documentation=ParameterDocumentation(), + docstring=ParameterDocstring(), ), Parameter( id_="f/position_or_name", @@ -54,7 +54,7 @@ def f(**kwargs): default_value=None, assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation(), + docstring=ParameterDocstring(), ), Parameter( id_="f/name_only", @@ -63,7 +63,7 @@ def f(**kwargs): default_value="0", assigned_by=ParameterAssignment.NAME_ONLY, is_public=True, - documentation=ParameterDocumentation(), + docstring=ParameterDocstring(), ), ], ), @@ -77,7 +77,7 @@ def f(**kwargs): default_value=None, assigned_by=ParameterAssignment.POSITIONAL_VARARG, is_public=True, - documentation=ParameterDocumentation(), + docstring=ParameterDocstring(), ), Parameter( id_="f/name_only", @@ -86,7 +86,7 @@ def f(**kwargs): default_value="0", assigned_by=ParameterAssignment.NAME_ONLY, is_public=True, - documentation=ParameterDocumentation(), + docstring=ParameterDocstring(), ), ], ), @@ -100,7 +100,7 @@ def f(**kwargs): default_value=None, assigned_by=ParameterAssignment.NAMED_VARARG, is_public=True, - documentation=ParameterDocumentation(), + docstring=ParameterDocstring(), ), ], ), @@ -117,7 +117,7 @@ def test_get_parameter_list_on_global_functions(python_code: str, expected_param assert isinstance(node, astroid.FunctionDef) actual_parameter_list = [ - it.to_json() + it.to_dict() for it in get_parameter_list( docstring_parser=PlaintextDocstringParser(), function_node=node, @@ -127,7 +127,7 @@ def test_get_parameter_list_on_global_functions(python_code: str, expected_param ) ] - expected_parameter_list = [it.to_json() for it in expected_parameter_list] + expected_parameter_list = [it.to_dict() for it in expected_parameter_list] assert actual_parameter_list == expected_parameter_list @@ -172,7 +172,7 @@ def f(*self): default_value=None, assigned_by=ParameterAssignment.IMPLICIT, is_public=True, - documentation=ParameterDocumentation(), + docstring=ParameterDocstring(), ), Parameter( id_="C/f/p", @@ -181,7 +181,7 @@ def f(*self): default_value=None, assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation(), + docstring=ParameterDocstring(), ), ], ), @@ -195,7 +195,7 @@ def f(*self): default_value=None, assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation(), + docstring=ParameterDocstring(), ), ], ), @@ -209,7 +209,7 @@ def f(*self): default_value=None, assigned_by=ParameterAssignment.IMPLICIT, is_public=True, - documentation=ParameterDocumentation(), + docstring=ParameterDocstring(), ), Parameter( id_="C/f/p", @@ -218,7 +218,7 @@ def f(*self): default_value=None, assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation(), + docstring=ParameterDocstring(), ), ], ), @@ -232,7 +232,7 @@ def f(*self): default_value=None, assigned_by=ParameterAssignment.POSITIONAL_VARARG, is_public=True, - documentation=ParameterDocumentation(), + docstring=ParameterDocstring(), ), ], ), @@ -255,7 +255,7 @@ def test_get_parameter_list_on_method(python_code: str, expected_parameter_list: assert isinstance(node, astroid.FunctionDef) actual_parameter_list = [ - it.to_json() + it.to_dict() for it in get_parameter_list( docstring_parser=PlaintextDocstringParser(), function_node=node, @@ -265,6 +265,6 @@ def test_get_parameter_list_on_method(python_code: str, expected_parameter_list: ) ] - expected_parameter_list = [it.to_json() for it in expected_parameter_list] + expected_parameter_list = [it.to_dict() for it in expected_parameter_list] assert actual_parameter_list == expected_parameter_list diff --git a/tests/library_analyzer/processing/api/test_instance_attributes.py b/tests/library_analyzer/processing/api/test_instance_attributes.py index c0198252..633866e4 100644 --- a/tests/library_analyzer/processing/api/test_instance_attributes.py +++ b/tests/library_analyzer/processing/api/test_instance_attributes.py @@ -20,7 +20,7 @@ class TestClass: """, ), - [Attribute("string_value", NamedType("str"))], + [Attribute("test/string_value", "string_value", NamedType("str"))], ), ( inspect.cleandoc( @@ -50,8 +50,8 @@ class TestClass3: """, ), [ - Attribute("other_class", NamedType("object")), - Attribute("int_value", NamedType("int")), + Attribute("test/other_class", "other_class", NamedType("object")), + Attribute("test/int_value", "int_value", NamedType("int")), ], ), ( @@ -64,8 +64,8 @@ def __init__(self, int_value: int = 5) -> None: """, ), [ - Attribute("int_value", NamedType("int")), - Attribute("bool_value", NamedType("bool")), + Attribute("test/int_value", "int_value", NamedType("int")), + Attribute("test/bool_value", "bool_value", NamedType("bool")), ], ), ], @@ -74,4 +74,4 @@ def test_instance_attributes(class_code: str, expected_attributes: list[Attribut module = astroid.parse(class_code) classes = [class_ for class_ in module.body if isinstance(class_, astroid.ClassDef)] assert len(classes) == 1 - assert get_instance_attributes(classes[0]) == expected_attributes + assert get_instance_attributes(classes[0], "test") == expected_attributes diff --git a/tests/library_analyzer/processing/api/test_resolve_references.py b/tests/library_analyzer/processing/api/test_resolve_references.py new file mode 100644 index 00000000..e62109c4 --- /dev/null +++ b/tests/library_analyzer/processing/api/test_resolve_references.py @@ -0,0 +1,629 @@ +from __future__ import annotations + +from dataclasses import dataclass + +import astroid +import pytest +from library_analyzer.processing.api import ( + ClassScopeNode, + MemberAccess, + ScopeNode, + get_scope, +) + + +@dataclass +class SimpleScope: + node_name: str + children: list[SimpleScope] + + +@dataclass +class SimpleClassScope(SimpleScope): + class_variables: list[str] + instance_variables: list[str] + + +def transform_member_access(member_access: MemberAccess) -> str: + attribute_names = [] + + while isinstance(member_access, MemberAccess): + attribute_names.append(member_access.value.name) + member_access = member_access.expression + if isinstance(member_access, astroid.Name): + attribute_names.append(member_access.name) + + return ".".join(reversed(attribute_names)) + + +@pytest.mark.parametrize( + ("code", "expected"), + [ + ( + """ + glob = 1 + class A: + def __init__(self): + self.value = 10 + self.test = 20 + def f(): + var1 = 1 + def g(): + var2 = 2 + """, + [ + SimpleScope( + "Module", + [ + SimpleScope("AssignName.glob", []), + SimpleClassScope( + "ClassDef.A", + [ + SimpleScope( + "FunctionDef.__init__", + [ + SimpleScope("AssignAttr.value", []), + SimpleScope("AssignAttr.test", []), + ], + ), + SimpleScope( + "FunctionDef.f", + [SimpleScope("AssignName.var1", [])], + ), + ], + [], + ["value", "test"], + ), + SimpleScope("FunctionDef.g", [SimpleScope("AssignName.var2", [])]), + ], + ), + ], + ), + ( + """ + def function_scope(): + res = 23 + return res + """, + [ + SimpleScope( + "Module", + [ + SimpleScope( + "FunctionDef.function_scope", + [SimpleScope("AssignName.res", [])], + ), + ], + ), + ], + ), + ( + """ + var1 = 10 + def function_scope(): + res = var1 + return res + """, + [ + SimpleScope( + "Module", + [ + SimpleScope("AssignName.var1", []), + SimpleScope( + "FunctionDef.function_scope", + [SimpleScope("AssignName.res", [])], + ), + ], + ), + ], + ), + ( + """ + var1 = 10 + def function_scope(): + global var1 + res = var1 + return res + """, + [ + SimpleScope( + "Module", + [ + SimpleScope("AssignName.var1", []), + SimpleScope( + "FunctionDef.function_scope", + [SimpleScope("AssignName.res", [])], + ), + ], + ), + ], + ), + ( + """ + def function_scope(parameter): + res = parameter + return res + """, + [ + SimpleScope( + "Module", + [ + SimpleScope( + "FunctionDef.function_scope", + [ + SimpleScope("AssignName.parameter", []), + SimpleScope("AssignName.res", []), + ], + ), + ], + ), + ], + ), + ( + """ + class A: + class_attr1 = 20 + + def local_class_attr(): + var1 = A.class_attr1 + return var1 + """, + [ + SimpleScope( + "Module", + [ + SimpleClassScope( + "ClassDef.A", + [ + SimpleScope("AssignName.class_attr1", []), + SimpleScope( + "FunctionDef.local_class_attr", + [SimpleScope("AssignName.var1", [])], + ), + ], + ["class_attr1"], + [], + ), + ], + ), + ], + ), + ( + """ + class B: + local_class_attr1 = 20 + local_class_attr2 = 30 + + def __init__(self): + self.instance_attr1 = 10 + + def local_instance_attr(): + var1 = self.instance_attr1 + return var1 + """, + [ + SimpleScope( + "Module", + [ + SimpleClassScope( + "ClassDef.B", + [ + SimpleScope("AssignName.local_class_attr1", []), + SimpleScope("AssignName.local_class_attr2", []), + SimpleScope( + "FunctionDef.__init__", + [SimpleScope("AssignAttr.instance_attr1", [])], + ), + SimpleScope( + "FunctionDef.local_instance_attr", + [SimpleScope("AssignName.var1", [])], + ), + ], + ["local_class_attr1", "local_class_attr2"], + ["instance_attr1"], + ), + ], + ), + ], + ), + ( + """ + class B: + def __init__(self): + self.instance_attr1 = 10 + + def local_instance_attr(): + var1 = B().instance_attr1 + return var1 + """, + [ + SimpleScope( + "Module", + [ + SimpleClassScope( + "ClassDef.B", + [ + SimpleScope( + "FunctionDef.__init__", + [SimpleScope("AssignAttr.instance_attr1", [])], + ), + ], + [], + ["instance_attr1"], + ), + SimpleScope( + "FunctionDef.local_instance_attr", + [SimpleScope("AssignName.var1", [])], + ), + ], + ), + ], + ), + ( + """ + class A: + var1 = 10 + + class B: + var2 = 20 + """, + [ + SimpleScope( + "Module", + [ + SimpleClassScope( + "ClassDef.A", + [ + SimpleScope("AssignName.var1", []), + SimpleClassScope( + "ClassDef.B", + [SimpleScope("AssignName.var2", [])], + ["var2"], + [], + ), + ], + ["var1"], + [], + ), + ], + ), + ], + ), + ( + """ + def function_scope(): + var1 = 10 + + class B: + var2 = 20 + """, + [ + SimpleScope( + "Module", + [ + SimpleScope( + "FunctionDef.function_scope", + [ + SimpleScope("AssignName.var1", []), + SimpleClassScope( + "ClassDef.B", + [SimpleScope("AssignName.var2", [])], + ["var2"], + [], + ), + ], + ), + ], + ), + ], + ), + ( + """ + def function_scope(): + var1 = 10 + + def local_function_scope(): + var2 = 20 + """, + [ + SimpleScope( + "Module", + [ + SimpleScope( + "FunctionDef.function_scope", + [ + SimpleScope("AssignName.var1", []), + SimpleScope( + "FunctionDef.local_function_scope", + [SimpleScope("AssignName.var2", [])], + ), + ], + ), + ], + ), + ], + ), + ( + """ + import math + + class A: + value = math.pi + """, + [ + SimpleScope( + "Module", + [ + SimpleScope("Import.math", []), + SimpleClassScope( + "ClassDef.A", + [SimpleScope("AssignName.value", [])], + ["value"], + [], + ), + ], + ), + ], + ), + ( + """ + from math import pi + + class B: + value = pi + """, + [ + SimpleScope( + "Module", + [ + SimpleScope("ImportFrom.math.pi", []), + SimpleClassScope("ClassDef.B", [SimpleScope("AssignName.value", [])], ["value"], []), + ], + ), + ], + ), + ( + """ + def function_scope(): + var1 = 10 + + def local_function_scope(): + var2 = 20 + + class local_class_scope: + var3 = 30 + + def local_class_function_scope(): + var4 = 40 + """, + [ + SimpleScope( + "Module", + [ + SimpleScope( + "FunctionDef.function_scope", + [ + SimpleScope("AssignName.var1", []), + SimpleScope( + "FunctionDef.local_function_scope", + [ + SimpleScope("AssignName.var2", []), + SimpleClassScope( + "ClassDef.local_class_scope", + [ + SimpleScope("AssignName.var3", []), + SimpleScope( + "FunctionDef.local_class_function_scope", + [ + SimpleScope( + "AssignName.var4", + [], + ), + ], + ), + ], + ["var3"], + [], + ), + ], + ), + ], + ), + ], + ), + ], + ), + ( + """ + from collections.abc import Callable + from typing import Any + + import astroid + + _EnterAndLeaveFunctions = tuple[ + Callable[[astroid.NodeNG], None] | None, + Callable[[astroid.NodeNG], None] | None, + ] + + + class ASTWalker: + additional_locals = [] + + def __init__(self, handler: Any) -> None: + self._handler = handler + self._cache: dict[type, _EnterAndLeaveFunctions] = {} + + def walk(self, node: astroid.NodeNG) -> None: + self.__walk(node, set()) + + def __walk(self, node: astroid.NodeNG, visited_nodes: set[astroid.NodeNG]) -> None: + if node in visited_nodes: + raise AssertionError("Node visited twice") + visited_nodes.add(node) + + self.__enter(node) + for child_node in node.get_children(): + self.__walk(child_node, visited_nodes) + self.__leave(node) + + def __enter(self, node: astroid.NodeNG) -> None: + method = self.__get_callbacks(node)[0] + if method is not None: + method(node) + + def __leave(self, node: astroid.NodeNG) -> None: + method = self.__get_callbacks(node)[1] + if method is not None: + method(node) + + def __get_callbacks(self, node: astroid.NodeNG) -> _EnterAndLeaveFunctions: + klass = node.__class__ + methods = self._cache.get(klass) + + if methods is None: + handler = self._handler + class_name = klass.__name__.lower() + enter_method = getattr(handler, f"enter_{class_name}", getattr(handler, "enter_default", None)) + leave_method = getattr(handler, f"leave_{class_name}", getattr(handler, "leave_default", None)) + self._cache[klass] = (enter_method, leave_method) + else: + enter_method, leave_method = methods + + return enter_method, leave_method + + """, + [ + SimpleScope( + "Module", + [ + SimpleScope("ImportFrom.collections.abc.Callable", []), + SimpleScope("ImportFrom.typing.Any", []), + SimpleScope("Import.astroid", []), + SimpleScope("AssignName._EnterAndLeaveFunctions", []), + SimpleClassScope( + "ClassDef.ASTWalker", + [ + SimpleScope("AssignName.additional_locals", []), + SimpleScope( + "FunctionDef.__init__", + [ + SimpleScope("AssignName.handler", []), + SimpleScope("AssignAttr._handler", []), + SimpleScope("AssignAttr._cache", []), + ], + ), + SimpleScope( + "FunctionDef.walk", + [ + SimpleScope("AssignName.node", []), + ], + ), + SimpleScope( + "FunctionDef.__walk", + [ + SimpleScope("AssignName.node", []), + SimpleScope("AssignName.visited_nodes", []), + ], + ), + SimpleScope( + "FunctionDef.__enter", + [ + SimpleScope("AssignName.node", []), + SimpleScope("AssignName.method", []), + ], + ), + SimpleScope( + "FunctionDef.__leave", + [ + SimpleScope("AssignName.node", []), + SimpleScope("AssignName.method", []), + ], + ), + SimpleScope( + "FunctionDef.__get_callbacks", + [ + SimpleScope("AssignName.node", []), + SimpleScope("AssignName.klass", []), + SimpleScope("AssignName.methods", []), + SimpleScope("AssignName.handler", []), + SimpleScope("AssignName.class_name", []), + SimpleScope("AssignName.enter_method", []), + SimpleScope("AssignName.leave_method", []), + ], + ), + ], + ["additional_locals"], + ["_handler", "_cache"], + ), + ], + ), + ], + ), + ], + ids=[ + "Seminar Example", + "Function Scope", + "Function Scope with variable", + "Function Scope with global variable", + "Function Scope with Parameter", + "Class Scope with class attribute and Class function", + "Class Scope with instance attribute and Class function", + "Class Scope with instance attribute and Modul function", + "Class Scope within Class Scope", + "Class Scope within Function Scope", + "Function Scope within Function Scope", + "Import Scope", + "Import From Scope", + "Complex Scope", + "ASTWalker", + ], +) +def test_get_scope(code: str, expected: list[SimpleScope | SimpleClassScope]) -> None: + result = get_scope(code) + assert_test_get_scope(result, expected) + + +def assert_test_get_scope(result: list[ScopeNode], expected: list[SimpleScope | SimpleClassScope]) -> None: + transformed_result = [ + transform_result(node) for node in result + ] # The result and the expected data is simplified to make the comparison easier + assert transformed_result == expected + + +def transform_result(node: ScopeNode | ClassScopeNode) -> SimpleScope | SimpleClassScope: + if node.children is not None: + if isinstance(node, ClassScopeNode): + return SimpleClassScope( + to_string(node.node), + [transform_result(child) for child in node.children], + [to_string_class(child) for child in node.class_variables], + [to_string_class(child) for child in node.instance_variables], + ) + return SimpleScope(to_string(node.node), [transform_result(child) for child in node.children]) + else: + return SimpleScope(to_string(node.node), []) + + +def to_string(node: astroid.NodeNG) -> str: + if isinstance(node, astroid.Module): + return "Module" + elif isinstance(node, astroid.ClassDef | astroid.FunctionDef | astroid.AssignName): + return f"{node.__class__.__name__}.{node.name}" + elif isinstance(node, astroid.AssignAttr): + return f"{node.__class__.__name__}.{node.attrname}" + elif isinstance(node, MemberAccess): + result = transform_member_access(node) + return f"MemberAccess.{result}" + elif isinstance(node, astroid.Import): + return f"{node.__class__.__name__}.{node.names[0][0]}" + elif isinstance(node, astroid.ImportFrom): + return f"{node.__class__.__name__}.{node.modname}.{node.names[0][0]}" + raise NotImplementedError(f"Unknown node type: {node.__class__.__name__}") + + +def to_string_class(node: astroid.NodeNG) -> str: + if isinstance(node, astroid.AssignAttr): + return f"{node.attrname}" + elif isinstance(node, astroid.AssignName): + return f"{node.name}" + raise NotImplementedError(f"Unknown node type: {node.__class__.__name__}") diff --git a/tests/library_analyzer/processing/dependencies/test_get_dependency.py b/tests/library_analyzer/processing/dependencies/test_get_dependency.py index 1544f76f..29ebc6ca 100644 --- a/tests/library_analyzer/processing/dependencies/test_get_dependency.py +++ b/tests/library_analyzer/processing/dependencies/test_get_dependency.py @@ -1,9 +1,7 @@ -import spacy -import spacy.cli from library_analyzer.processing.api.model import ( Parameter, ParameterAssignment, - ParameterDocumentation, + ParameterDocstring, ) from library_analyzer.processing.dependencies import ( Action, @@ -18,12 +16,9 @@ extract_condition, extract_lefts_and_rights, ) +from library_analyzer.utils import load_language -try: - nlp = spacy.load("en_core_web_sm") -except OSError: - spacy.cli.download("en_core_web_sm") - nlp = spacy.load("en_core_web_sm") +nlp = load_language("en_core_web_sm") def test_extract_lefts_and_rights() -> None: @@ -43,7 +38,10 @@ def test_extract_action() -> None: assert ignored_action == ParameterIsIgnored(action="this parameter is ignored") action_is_illegal = nlp( - "Individual weights for each sample raises error if sample_weight is passed and base_estimator fit method does not support it. ", + ( + "Individual weights for each sample raises error if sample_weight is passed and base_estimator fit method" + " does not support it. " + ), ) action_is_illegal_action_token = action_is_illegal[5] action_is_illegal_condition_token = action_is_illegal[10] @@ -90,7 +88,7 @@ def test_extract_dependencies_from_docstring_pattern_adverbial_clause() -> None: default_value=None, assigned_by=ParameterAssignment.NAME_ONLY, is_public=True, - documentation=ParameterDocumentation( + docstring=ParameterDocstring( type="param possible types", default_value="", description=param_docstring_nlp.text, @@ -103,7 +101,7 @@ def test_extract_dependencies_from_docstring_pattern_adverbial_clause() -> None: default_value=None, assigned_by=ParameterAssignment.NAME_ONLY, is_public=True, - documentation=ParameterDocumentation( + docstring=ParameterDocstring( type="param possible types", default_value="", description="param probability docstring", diff --git a/tests/library_analyzer/processing/migration/model/test_differ.py b/tests/library_analyzer/processing/migration/model/test_differ.py index f4ce1c93..e703e2bc 100644 --- a/tests/library_analyzer/processing/migration/model/test_differ.py +++ b/tests/library_analyzer/processing/migration/model/test_differ.py @@ -5,13 +5,13 @@ API, Attribute, Class, - ClassDocumentation, + ClassDocstring, Function, - FunctionDocumentation, + FunctionDocstring, NamedType, Parameter, ParameterAssignment, - ParameterDocumentation, + ParameterDocstring, Result, ResultDocstring, UnionType, @@ -41,14 +41,14 @@ differ_list, ) def test_attribute_similarity(differ: AbstractDiffer) -> None: - attribute_a = Attribute("test_string", NamedType("str")) + attribute_a = Attribute("test_string", "test_string", NamedType("str")) assert differ.compute_attribute_similarity(attribute_a, attribute_a) == 1 - attribute_b = Attribute("new_test_string", NamedType("str")) + attribute_b = Attribute("new_test_string", "new_test_string", NamedType("str")) assert differ.compute_attribute_similarity(attribute_a, attribute_b) >= 0.5 - attribute_a = Attribute("value", UnionType([NamedType("str"), NamedType("int")])) - attribute_b = Attribute("value", UnionType([NamedType("str"), NamedType("bool")])) + attribute_a = Attribute("value", "value", UnionType([NamedType("str"), NamedType("int")])) + attribute_b = Attribute("value", "value", UnionType([NamedType("str"), NamedType("bool")])) assert differ.compute_attribute_similarity(attribute_a, attribute_b) >= 0.5 @@ -69,7 +69,7 @@ class Test: superclasses=[], is_public=True, reexported_by=[], - documentation=ClassDocumentation("This is a test"), + docstring=ClassDocstring("This is a test"), code=code_a, instance_attributes=[], ) @@ -87,7 +87,7 @@ class newTest: superclasses=[], is_public=True, reexported_by=[], - documentation=ClassDocumentation("This is a new test"), + docstring=ClassDocstring("This is a new test"), code=code_b, instance_attributes=[], ) @@ -107,7 +107,7 @@ def test_function_similarity(differ: AbstractDiffer) -> None: default_value="'test_str'", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("'test_str'", "", ""), + docstring=ParameterDocstring("'test_str'", "", ""), ), ] results: list[Result] = [] @@ -128,7 +128,7 @@ def test(test_parameter: str): results=results, is_public=True, reexported_by=[], - documentation=FunctionDocumentation( + docstring=FunctionDocstring( "This test function is a proof of work", ), code=code_a, @@ -152,7 +152,7 @@ def test_method(test_parameter: str): default_value="'test_str'", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("'test_str'", "", ""), + docstring=ParameterDocstring("'test_str'", "", ""), ), ] function_b = Function( @@ -163,7 +163,7 @@ def test_method(test_parameter: str): results=results, is_public=True, reexported_by=[], - documentation=FunctionDocumentation( + docstring=FunctionDocstring( "This test function is a proof of concept.", ), code=code_b, @@ -183,7 +183,7 @@ def test_parameter_similarity(differ: AbstractDiffer) -> None: default_value="'str'", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("'str'", "", ""), + docstring=ParameterDocstring("'str'", "", ""), ) parameter_b = Parameter( id_="test/test.Test/test_method/test_parameter", @@ -192,7 +192,7 @@ def test_parameter_similarity(differ: AbstractDiffer) -> None: default_value="5", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("int", "", ""), + docstring=ParameterDocstring("int", "", ""), ) assert 0.45 < differ.compute_parameter_similarity(parameter_a, parameter_b) < 0.7 @@ -203,7 +203,7 @@ def test_parameter_similarity(differ: AbstractDiffer) -> None: default_value="9", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("int", "", ""), + docstring=ParameterDocstring("int", "", ""), ) assert 0.75 < differ.compute_parameter_similarity(parameter_a, parameter_b) < 0.9 @@ -213,10 +213,11 @@ def test_parameter_similarity(differ: AbstractDiffer) -> None: differ_list, ) def test_result_similarity(differ: AbstractDiffer) -> None: - result_a = Result("config", ResultDocstring("dict", "")) + result_a = Result("config", "config", ResultDocstring("dict", "")) assert differ.compute_result_similarity(result_a, result_a) == 1 result_b = Result( + "new_config", "new_config", ResultDocstring("dict", "A dictionary that includes the new configuration"), ) diff --git a/tests/library_analyzer/processing/migration/model/test_inheritance_differ.py b/tests/library_analyzer/processing/migration/model/test_inheritance_differ.py index a85b2b93..17fca084 100644 --- a/tests/library_analyzer/processing/migration/model/test_inheritance_differ.py +++ b/tests/library_analyzer/processing/migration/model/test_inheritance_differ.py @@ -5,13 +5,13 @@ API, Attribute, Class, - ClassDocumentation, + ClassDocstring, Function, - FunctionDocumentation, + FunctionDocstring, NamedType, Parameter, ParameterAssignment, - ParameterDocumentation, + ParameterDocstring, Result, ResultDocstring, ) @@ -34,7 +34,12 @@ class SuperTest: pass""", ) class_id_super = "test/test/SuperTest" - attribute_super = Attribute("new_test_int", NamedType("int"), class_id=class_id_super) + attribute_super = Attribute( + "test/test/SuperTest/new_test_int", + "new_test_int", + NamedType("int"), + class_id=class_id_super, + ) class_super = Class( id=class_id_super, qname="test.SuperTest", @@ -42,7 +47,7 @@ class SuperTest: superclasses=[], is_public=True, reexported_by=[], - documentation=ClassDocumentation("This is a test"), + docstring=ClassDocstring("This is a test"), code=code_a, instance_attributes=[attribute_super], ) @@ -53,7 +58,7 @@ class SuperTest: superclasses=["SuperTest"], is_public=True, reexported_by=[], - documentation=ClassDocumentation("This is a test"), + docstring=ClassDocstring("This is a test"), code="", instance_attributes=[], ) @@ -65,9 +70,9 @@ class SuperTest: default_value="'test_str_a'", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("'test_str_a'", "", ""), + docstring=ParameterDocstring("'test_str_a'", "", ""), ) - result_super = Result("config", ResultDocstring("dict", ""), function_id=function_id_super) + result_super = Result("config", "config", ResultDocstring("dict", ""), function_id=function_id_super) code_function_a = cleandoc( """ def test_function_super(test_parameter: str): @@ -85,7 +90,7 @@ def test_function_super(test_parameter: str): results=[result_super], is_public=True, reexported_by=[], - documentation=FunctionDocumentation( + docstring=FunctionDocstring( "This is a test function", ), code=code_function_a, @@ -112,7 +117,7 @@ class SubTest: pass""", ) class_id_sub = "test/test/SubTest" - attribute_sub = Attribute("new_test_int", NamedType("int"), class_id=class_id_sub) + attribute_sub = Attribute("test/test/SubTest/new_test_int", "new_test_int", NamedType("int"), class_id=class_id_sub) class_sub = Class( id=class_id_sub, qname="test.SubTest", @@ -120,7 +125,7 @@ class SubTest: superclasses=["SuperTest"], is_public=True, reexported_by=[], - documentation=ClassDocumentation("This is a test"), + docstring=ClassDocstring("This is a test"), code=code_a, instance_attributes=[attribute_sub], ) @@ -131,7 +136,7 @@ class SubTest: superclasses=[], is_public=True, reexported_by=[], - documentation=ClassDocumentation("This is a test"), + docstring=ClassDocstring("This is a test"), code="", instance_attributes=[], ) @@ -143,9 +148,9 @@ class SubTest: default_value="'test_str_a'", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("'test_str_a'", "", ""), + docstring=ParameterDocstring("'test_str_a'", "", ""), ) - result_sub = Result("config", ResultDocstring("dict", ""), function_id=function_id_sub) + result_sub = Result("config", "config", ResultDocstring("dict", ""), function_id=function_id_sub) code_function_a = cleandoc( """ def test_function_sub(test_parameter: str): @@ -163,7 +168,7 @@ def test_function_sub(test_parameter: str): results=[result_sub], is_public=True, reexported_by=[], - documentation=FunctionDocumentation( + docstring=FunctionDocstring( "This test function is only for testing", ), code=code_function_a, diff --git a/tests/library_analyzer/processing/migration/model/test_mapping.py b/tests/library_analyzer/processing/migration/model/test_mapping.py index 0f4932a5..17805120 100644 --- a/tests/library_analyzer/processing/migration/model/test_mapping.py +++ b/tests/library_analyzer/processing/migration/model/test_mapping.py @@ -1,6 +1,6 @@ from inspect import cleandoc -from library_analyzer.processing.api.model import API, Class, ClassDocumentation +from library_analyzer.processing.api.model import API, Class, ClassDocstring from library_analyzer.processing.migration.model import ( APIMapping, ManyToManyMapping, @@ -21,7 +21,7 @@ def test_one_to_one_mapping() -> None: superclasses=[], is_public=True, reexported_by=[], - documentation=ClassDocumentation("This is a test"), + docstring=ClassDocstring("This is a test"), code="", instance_attributes=[], ) @@ -81,7 +81,7 @@ def test_many_to_many_mapping() -> None: superclasses=[], is_public=True, reexported_by=[], - documentation=ClassDocumentation("This is a test"), + docstring=ClassDocstring("This is a test"), code="", instance_attributes=[], ) @@ -110,7 +110,7 @@ def test_too_different_mapping() -> None: superclasses=[], is_public=True, reexported_by=[], - documentation=ClassDocumentation("This is a test"), + docstring=ClassDocstring("This is a test"), code="", instance_attributes=[], ) @@ -123,7 +123,7 @@ def test_too_different_mapping() -> None: superclasses=[], is_public=True, reexported_by=[], - documentation=ClassDocumentation("not similar to the other class"), + docstring=ClassDocstring("not similar to the other class"), code=cleandoc( """ @@ -165,7 +165,7 @@ def create_apis() -> tuple[API, API, Class, Class, Class]: superclasses=[], is_public=True, reexported_by=[], - documentation=ClassDocumentation("This is a test"), + docstring=ClassDocstring("This is a test"), code="", instance_attributes=[], ) @@ -178,7 +178,7 @@ def create_apis() -> tuple[API, API, Class, Class, Class]: superclasses=[], is_public=True, reexported_by=[], - documentation=ClassDocumentation("This is a test"), + docstring=ClassDocstring("This is a test"), code="", instance_attributes=[], ) @@ -189,7 +189,7 @@ def create_apis() -> tuple[API, API, Class, Class, Class]: superclasses=[], is_public=True, reexported_by=[], - documentation=ClassDocumentation("This is a test"), + docstring=ClassDocstring("This is a test"), code="", instance_attributes=[], ) diff --git a/tests/library_analyzer/processing/migration/model/test_strict_differ.py b/tests/library_analyzer/processing/migration/model/test_strict_differ.py index 5e24fb6f..e88eaa1b 100644 --- a/tests/library_analyzer/processing/migration/model/test_strict_differ.py +++ b/tests/library_analyzer/processing/migration/model/test_strict_differ.py @@ -5,13 +5,13 @@ API, Attribute, Class, - ClassDocumentation, + ClassDocstring, Function, - FunctionDocumentation, + FunctionDocstring, NamedType, Parameter, ParameterAssignment, - ParameterDocumentation, + ParameterDocstring, Result, ResultDocstring, ) @@ -37,7 +37,7 @@ class Test: pass""", ) class_id_a = "test/test/Test" - attribute_a = Attribute("new_test_string", NamedType("str"), class_id=class_id_a) + attribute_a = Attribute("test/test/Test/new_test_string", "new_test_string", NamedType("str"), class_id=class_id_a) class_a = Class( id=class_id_a, qname="test.Test", @@ -45,7 +45,7 @@ class Test: superclasses=[], is_public=True, reexported_by=[], - documentation=ClassDocumentation("This is a test"), + docstring=ClassDocstring("This is a test"), code=code_a, instance_attributes=[attribute_a], ) @@ -56,7 +56,7 @@ class newTest: pass""", ) class_id_b = "test/test/NewTest" - attribute_b = Attribute("test_string", NamedType("str"), class_id=class_id_b) + attribute_b = Attribute("test/test/NewTest/test_string", "test_string", NamedType("str"), class_id=class_id_b) class_b = Class( id=class_id_b, qname="test.newTest", @@ -64,7 +64,7 @@ class newTest: superclasses=[], is_public=True, reexported_by=[], - documentation=ClassDocumentation("This is a new test"), + docstring=ClassDocstring("This is a new test"), code=code_b, instance_attributes=[attribute_b], ) @@ -79,9 +79,9 @@ class newTest: default_value="'test_str_a'", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("'test_str_a'", "", ""), + docstring=ParameterDocstring("'test_str_a'", "", ""), ) - result_a = Result("config", ResultDocstring("dict", ""), function_id=function_id_a) + result_a = Result("config", "config", ResultDocstring("dict", ""), function_id=function_id_a) code_function_a = cleandoc( """ def test(test_parameter: str): @@ -99,7 +99,7 @@ def test(test_parameter: str): results=[result_a], is_public=True, reexported_by=[], - documentation=FunctionDocumentation( + docstring=FunctionDocstring( "This test function is a for testing", ), code=code_function_a, @@ -121,9 +121,10 @@ def test_method(test_parameter: str): default_value="'test_str_b'", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("'test_str_b'", "", ""), + docstring=ParameterDocstring("'test_str_b'", "", ""), ) result_b = Result( + "new_config", "new_config", ResultDocstring("dict", "A dictionary that includes the new configuration"), function_id=function_id_b, @@ -136,7 +137,7 @@ def test_method(test_parameter: str): results=[result_b], is_public=True, reexported_by=[], - documentation=FunctionDocumentation( + docstring=FunctionDocstring( "This test function is a test", ), code=code_b, diff --git a/tests/library_analyzer/processing/migration/model/test_unchanged_differ.py b/tests/library_analyzer/processing/migration/model/test_unchanged_differ.py index 8f9d527a..3fc79090 100644 --- a/tests/library_analyzer/processing/migration/model/test_unchanged_differ.py +++ b/tests/library_analyzer/processing/migration/model/test_unchanged_differ.py @@ -5,13 +5,13 @@ API, Attribute, Class, - ClassDocumentation, + ClassDocstring, Function, - FunctionDocumentation, + FunctionDocstring, NamedType, Parameter, ParameterAssignment, - ParameterDocumentation, + ParameterDocstring, Result, ResultDocstring, ) @@ -27,7 +27,7 @@ class Test: pass""", ) class_id_a = "test/test/Test" - attribute_a = Attribute("new_test_string", NamedType("str"), class_id=class_id_a) + attribute_a = Attribute("test/test/Test/new_test_string", "new_test_string", NamedType("str"), class_id=class_id_a) class_a = Class( id=class_id_a, qname="test.Test", @@ -35,7 +35,7 @@ class Test: superclasses=[], is_public=True, reexported_by=[], - documentation=ClassDocumentation("This is a test"), + docstring=ClassDocstring("This is a test"), code=code_a, instance_attributes=[attribute_a], ) @@ -46,7 +46,7 @@ class newTest: pass""", ) class_id_b = "test/test/NewTest" - attribute_b = Attribute("test_string", NamedType("str"), class_id=class_id_b) + attribute_b = Attribute("test/test/NewTest/test_string", "test_string", NamedType("str"), class_id=class_id_b) class_b = Class( id=class_id_b, qname="test.newTest", @@ -54,7 +54,7 @@ class newTest: superclasses=[], is_public=True, reexported_by=[], - documentation=ClassDocumentation("This is a new test"), + docstring=ClassDocstring("This is a new test"), code=code_b, instance_attributes=[attribute_b], ) @@ -69,9 +69,9 @@ class newTest: default_value="'test_str_a'", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("'test_str_a'", "", ""), + docstring=ParameterDocstring("'test_str_a'", "", ""), ) - result_a = Result("config", ResultDocstring("dict", ""), function_id=function_id_a) + result_a = Result("config", "config", ResultDocstring("dict", ""), function_id=function_id_a) code_function_a = cleandoc( """ def test(test_parameter: str): @@ -89,7 +89,7 @@ def test(test_parameter: str): results=[result_a], is_public=True, reexported_by=[], - documentation=FunctionDocumentation( + docstring=FunctionDocstring( "This test function is a for testing", ), code=code_function_a, @@ -111,9 +111,10 @@ def test_method(test_parameter: str): default_value="'test_str_b'", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("'test_str_b'", "", ""), + docstring=ParameterDocstring("'test_str_b'", "", ""), ) result_b = Result( + "new_config", "new_config", ResultDocstring("dict", "A dictionary that includes the new configuration"), function_id=function_id_b, @@ -126,7 +127,7 @@ def test_method(test_parameter: str): results=[result_b], is_public=True, reexported_by=[], - documentation=FunctionDocumentation( + docstring=FunctionDocstring( "This test function is a test", ), code=code_b, @@ -143,23 +144,15 @@ def test_method(test_parameter: str): apiv1.add_class(class_c) apiv2.add_class(class_d) - class_mapping = OneToOneMapping(1.0, class_a, class_a) - function_mapping = OneToOneMapping(1.0, function_a, function_a) - attribute_mapping = OneToOneMapping(1.0, attribute_a, attribute_a) - parameter_mapping = OneToOneMapping(1.0, parameter_a, parameter_a) - result_mapping = OneToOneMapping(1.0, result_a, result_a) - class_mapping_changed_code = OneToOneMapping(1.0, class_c, class_d) + OneToOneMapping(1.0, class_a, class_a) + OneToOneMapping(1.0, function_a, function_a) + OneToOneMapping(1.0, parameter_a, parameter_a) unchanged_differ = UnchangedDiffer(None, [], apiv1, apiv2) - assert unchanged_differ.get_additional_mappings() == [class_mapping_changed_code] + assert unchanged_differ.compute_class_similarity(class_c, class_d) == 1 apiv1.classes.pop(class_c.id) apiv2.classes.pop(class_d.id) unchanged_differ = UnchangedDiffer(None, [], apiv1, apiv1) - expected_mappings = [ - class_mapping, - function_mapping, - parameter_mapping, - attribute_mapping, - result_mapping, - ] - assert unchanged_differ.get_additional_mappings() == expected_mappings + assert unchanged_differ.compute_class_similarity(class_a, class_a) == 1 + assert unchanged_differ.compute_function_similarity(function_a, function_a) == 1 + assert unchanged_differ.compute_parameter_similarity(parameter_a, parameter_a) == 1 diff --git a/tests/library_analyzer/processing/usages/model/test_usages.py b/tests/library_analyzer/processing/usages/model/test_usages.py index 1b96c8d6..abf64a74 100644 --- a/tests/library_analyzer/processing/usages/model/test_usages.py +++ b/tests/library_analyzer/processing/usages/model/test_usages.py @@ -20,21 +20,21 @@ def usage_counts_json() -> dict: @pytest.fixture() def usage_counts(usage_counts_json: dict) -> UsageCountStore: - return UsageCountStore.from_json(usage_counts_json) + return UsageCountStore.from_dict(usage_counts_json) -def test_to_json_is_inverse_of_from_json(usage_counts_json: Any) -> None: - assert UsageCountStore.from_json(usage_counts_json).to_json() == usage_counts_json +def test_to_dict_is_inverse_of_from_dict(usage_counts_json: Any) -> None: + assert UsageCountStore.from_dict(usage_counts_json).to_dict() == usage_counts_json -def test_from_json_is_inverse_of_to_json(usage_counts: UsageCountStore) -> None: - assert UsageCountStore.from_json(usage_counts.to_json()) == usage_counts +def test_from_dict_is_inverse_of_to_dict(usage_counts: UsageCountStore) -> None: + assert UsageCountStore.from_dict(usage_counts.to_dict()) == usage_counts def test_add_class_usage_for_new_class(usage_counts: UsageCountStore) -> None: usage_counts.add_class_usages("TestClass2") - assert usage_counts.to_json() == { + assert usage_counts.to_dict() == { "schemaVersion": USAGES_SCHEMA_VERSION, "class_counts": { "TestClass": 2, @@ -49,7 +49,7 @@ def test_add_class_usage_for_new_class(usage_counts: UsageCountStore) -> None: def test_add_class_usage_for_existing_class(usage_counts: UsageCountStore) -> None: usage_counts.add_class_usages("TestClass", 2) - assert usage_counts.to_json() == { + assert usage_counts.to_dict() == { "schemaVersion": USAGES_SCHEMA_VERSION, "class_counts": {"TestClass": 4}, "function_counts": {"TestClass.test_function": 2}, @@ -62,13 +62,13 @@ def test_remove_class_for_missing_class(usage_counts: UsageCountStore, usage_cou usage_counts.remove_class("TestClass2") # Should be unchanged - assert usage_counts.to_json() == usage_counts_json + assert usage_counts.to_dict() == usage_counts_json def test_remove_class_for_existing_class(usage_counts: UsageCountStore) -> None: usage_counts.remove_class("TestClass") - assert usage_counts.to_json() == { + assert usage_counts.to_dict() == { "schemaVersion": USAGES_SCHEMA_VERSION, "class_counts": {}, "function_counts": {}, @@ -80,7 +80,7 @@ def test_remove_class_for_existing_class(usage_counts: UsageCountStore) -> None: def test_add_function_usages_for_new_function(usage_counts: UsageCountStore) -> None: usage_counts.add_function_usages("TestClass.test_function_2") - assert usage_counts.to_json() == { + assert usage_counts.to_dict() == { "schemaVersion": USAGES_SCHEMA_VERSION, "class_counts": {"TestClass": 2}, "function_counts": { @@ -97,7 +97,7 @@ def test_add_function_usages_for_existing_function( ) -> None: usage_counts.add_function_usages("TestClass.test_function", 2) - assert usage_counts.to_json() == { + assert usage_counts.to_dict() == { "schemaVersion": USAGES_SCHEMA_VERSION, "class_counts": {"TestClass": 2}, "function_counts": {"TestClass.test_function": 4}, @@ -110,13 +110,13 @@ def test_remove_function_for_missing_function(usage_counts: UsageCountStore, usa usage_counts.remove_function("TestClass.test_function_2") # Should be unchanged - assert usage_counts.to_json() == usage_counts_json + assert usage_counts.to_dict() == usage_counts_json def test_remove_function_for_existing_function(usage_counts: UsageCountStore) -> None: usage_counts.remove_function("TestClass.test_function") - assert usage_counts.to_json() == { + assert usage_counts.to_dict() == { "schemaVersion": USAGES_SCHEMA_VERSION, "class_counts": {"TestClass": 2}, "function_counts": {}, @@ -128,7 +128,7 @@ def test_remove_function_for_existing_function(usage_counts: UsageCountStore) -> def test_add_parameter_usages_for_new_parameter(usage_counts: UsageCountStore) -> None: usage_counts.add_parameter_usages("TestClass.test_function.test_parameter_2") - assert usage_counts.to_json() == { + assert usage_counts.to_dict() == { "schemaVersion": USAGES_SCHEMA_VERSION, "class_counts": {"TestClass": 2}, "function_counts": {"TestClass.test_function": 2}, @@ -145,7 +145,7 @@ def test_add_parameter_usages_for_existing_parameter( ) -> None: usage_counts.add_parameter_usages("TestClass.test_function.test_parameter", 2) - assert usage_counts.to_json() == { + assert usage_counts.to_dict() == { "schemaVersion": USAGES_SCHEMA_VERSION, "class_counts": {"TestClass": 2}, "function_counts": {"TestClass.test_function": 2}, @@ -158,13 +158,13 @@ def test_remove_parameter_for_missing_parameter(usage_counts: UsageCountStore, u usage_counts.remove_parameter("TestClass.test_function.test_parameter_2") # Should be unchanged - assert usage_counts.to_json() == usage_counts_json + assert usage_counts.to_dict() == usage_counts_json def test_remove_parameter_for_existing_parameter(usage_counts: UsageCountStore) -> None: usage_counts.remove_parameter("TestClass.test_function.test_parameter") - assert usage_counts.to_json() == { + assert usage_counts.to_dict() == { "schemaVersion": USAGES_SCHEMA_VERSION, "class_counts": {"TestClass": 2}, "function_counts": {"TestClass.test_function": 2}, @@ -176,7 +176,7 @@ def test_remove_parameter_for_existing_parameter(usage_counts: UsageCountStore) def test_add_value_usages_for_new_parameter(usage_counts: UsageCountStore) -> None: usage_counts.add_value_usages("TestClass.test_function.test_parameter_2", "'test'") - assert usage_counts.to_json() == { + assert usage_counts.to_dict() == { "schemaVersion": USAGES_SCHEMA_VERSION, "class_counts": {"TestClass": 2}, "function_counts": {"TestClass.test_function": 2}, @@ -191,7 +191,7 @@ def test_add_value_usages_for_new_parameter(usage_counts: UsageCountStore) -> No def test_add_value_usages_for_new_value(usage_counts: UsageCountStore) -> None: usage_counts.add_value_usages("TestClass.test_function.test_parameter", "'test2'") - assert usage_counts.to_json() == { + assert usage_counts.to_dict() == { "schemaVersion": USAGES_SCHEMA_VERSION, "class_counts": {"TestClass": 2}, "function_counts": {"TestClass.test_function": 2}, @@ -205,7 +205,7 @@ def test_add_value_usages_for_existing_parameter_and_value( ) -> None: usage_counts.add_value_usages("TestClass.test_function.test_parameter", "'test'", 2) - assert usage_counts.to_json() == { + assert usage_counts.to_dict() == { "schemaVersion": USAGES_SCHEMA_VERSION, "class_counts": {"TestClass": 2}, "function_counts": {"TestClass.test_function": 2}, @@ -217,7 +217,7 @@ def test_add_value_usages_for_existing_parameter_and_value( def test_init_value_for_new_parameter(usage_counts: UsageCountStore) -> None: usage_counts.init_value("TestClass.test_function.test_parameter_2") - assert usage_counts.to_json() == { + assert usage_counts.to_dict() == { "schemaVersion": USAGES_SCHEMA_VERSION, "class_counts": {"TestClass": 2}, "function_counts": {"TestClass.test_function": 2}, @@ -232,7 +232,7 @@ def test_init_value_for_new_parameter(usage_counts: UsageCountStore) -> None: def test_init_value_for_existing_parameter(usage_counts: UsageCountStore) -> None: usage_counts.init_value("TestClass.test_function.test_parameter") - assert usage_counts.to_json() == { + assert usage_counts.to_dict() == { "schemaVersion": USAGES_SCHEMA_VERSION, "class_counts": {"TestClass": 2}, "function_counts": {"TestClass.test_function": 2}, @@ -299,7 +299,7 @@ def test_most_common_parameter_values_for_existing_parameter( def test_merge_other_into_self(usage_counts: UsageCountStore) -> None: - other = UsageCountStore.from_json( + other = UsageCountStore.from_dict( { "class_counts": { "TestClass": 2, @@ -322,7 +322,7 @@ def test_merge_other_into_self(usage_counts: UsageCountStore) -> None: usage_counts.merge_other_into_self(other) - assert usage_counts.to_json() == { + assert usage_counts.to_dict() == { "schemaVersion": USAGES_SCHEMA_VERSION, "class_counts": { "TestClass": 4, diff --git a/tests/migration/annotations/boundary_migration.py b/tests/migration/annotations/boundary_migration.py index 186c3554..aa962817 100644 --- a/tests/migration/annotations/boundary_migration.py +++ b/tests/migration/annotations/boundary_migration.py @@ -7,7 +7,7 @@ from library_analyzer.processing.api.model import ( Parameter, ParameterAssignment, - ParameterDocumentation, + ParameterDocstring, ) from library_analyzer.processing.migration.annotations import ( get_migration_text, @@ -35,7 +35,7 @@ def migrate_boundary_annotation_data_one_to_one_mapping() -> ( default_value="1", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("int", "1", ""), + docstring=ParameterDocstring("int", "1", ""), ) parameterv2 = Parameter( id_="test/test.boundary.test1.testB", @@ -44,7 +44,7 @@ def migrate_boundary_annotation_data_one_to_one_mapping() -> ( default_value="1", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("int", "1", ""), + docstring=ParameterDocstring("int", "1", ""), ) boundary_annotation = BoundaryAnnotation( target="test/test.boundary.test1.testA", @@ -95,7 +95,7 @@ def migrate_boundary_annotation_data_one_to_one_mapping_int_to_float() -> ( default_value="1", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("int", "1", "int in the range of (0, 10)"), + docstring=ParameterDocstring("int", "1", "int in the range of (0, 10)"), ) parameterv2 = Parameter( id_="test/test.boundary.test2.testB", @@ -104,7 +104,7 @@ def migrate_boundary_annotation_data_one_to_one_mapping_int_to_float() -> ( default_value="1.0", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("float", "1.0", "float in the range of [1.0, 9.0]"), + docstring=ParameterDocstring("float", "1.0", "float in the range of [1.0, 9.0]"), ) mapping = OneToOneMapping(1.0, parameterv1, parameterv2) @@ -159,7 +159,7 @@ def migrate_boundary_annotation_data_one_to_one_mapping_float_to_int() -> ( default_value="1.0", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("float", "1.0", "float in the range of [0.5, 9.5]"), + docstring=ParameterDocstring("float", "1.0", "float in the range of [0.5, 9.5]"), ) parameterv2 = Parameter( id_="test/test.boundary.test3.testB", @@ -168,7 +168,7 @@ def migrate_boundary_annotation_data_one_to_one_mapping_float_to_int() -> ( default_value="1", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("int", "1", "int in the range of (0, 10)"), + docstring=ParameterDocstring("int", "1", "int in the range of (0, 10)"), ) mapping = OneToOneMapping(1.0, parameterv1, parameterv2) @@ -222,7 +222,7 @@ def migrate_boundary_annotation_data_one_to_many_mapping() -> ( default_value="1", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("int", "1", "int in the range of (0, 10)"), + docstring=ParameterDocstring("int", "1", "int in the range of (0, 10)"), ) parameterv2_a = Parameter( id_="test/test.boundary.test4.testA", @@ -231,7 +231,7 @@ def migrate_boundary_annotation_data_one_to_many_mapping() -> ( default_value="1", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("int", "1", "int in the range of (0, 10)"), + docstring=ParameterDocstring("int", "1", "int in the range of (0, 10)"), ) parameterv2_b = Parameter( id_="test/test.boundary.test4.testB", @@ -240,7 +240,7 @@ def migrate_boundary_annotation_data_one_to_many_mapping() -> ( default_value="1.0", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("float", "1.0", "float in the range of (0.0, 10.0)"), + docstring=ParameterDocstring("float", "1.0", "float in the range of (0.0, 10.0)"), ) parameterv2_c = Parameter( id_="test/test.boundary.test4.testC", @@ -249,7 +249,7 @@ def migrate_boundary_annotation_data_one_to_many_mapping() -> ( default_value=None, assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("", "", ""), + docstring=ParameterDocstring("", "", ""), ) mapping = OneToManyMapping(1.0, parameterv1, [parameterv2_a, parameterv2_b, parameterv2_c]) @@ -335,7 +335,7 @@ def migrate_boundary_annotation_data_duplicated() -> ( default_value="1", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("int", "1", ""), + docstring=ParameterDocstring("int", "1", ""), ) parameterv1_2 = Parameter( id_="test/test.boundary.duplicate.testA_2", @@ -344,7 +344,7 @@ def migrate_boundary_annotation_data_duplicated() -> ( default_value="1", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("int", "1", ""), + docstring=ParameterDocstring("int", "1", ""), ) parameterv2 = Parameter( id_="test/test.boundary.duplicate.testB", @@ -353,7 +353,7 @@ def migrate_boundary_annotation_data_duplicated() -> ( default_value="1", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("int", "1", ""), + docstring=ParameterDocstring("int", "1", ""), ) boundary_annotation = BoundaryAnnotation( target="test/test.boundary.duplicate.testA", diff --git a/tests/migration/annotations/called_after_migration.py b/tests/migration/annotations/called_after_migration.py index 306afa5c..65507c57 100644 --- a/tests/migration/annotations/called_after_migration.py +++ b/tests/migration/annotations/called_after_migration.py @@ -4,7 +4,7 @@ EnumReviewResult, TodoAnnotation, ) -from library_analyzer.processing.api.model import Function, FunctionDocumentation +from library_analyzer.processing.api.model import Function, FunctionDocstring from library_analyzer.processing.migration.annotations import ( get_migration_text, migration_author, @@ -32,7 +32,7 @@ def migrate_called_after_annotation_data_one_to_one_mapping() -> ( results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) functionv1_before = Function( @@ -43,7 +43,7 @@ def migrate_called_after_annotation_data_one_to_one_mapping() -> ( results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) functionv2_after = Function( @@ -54,7 +54,7 @@ def migrate_called_after_annotation_data_one_to_one_mapping() -> ( results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) functionv2_before = Function( @@ -65,7 +65,7 @@ def migrate_called_after_annotation_data_one_to_one_mapping() -> ( results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) mapping_after = OneToOneMapping(1.0, functionv1_after, functionv2_after) @@ -104,7 +104,7 @@ def migrate_called_after_annotation_data_one_to_many_mapping() -> ( results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) functionv1_before = Function( @@ -115,7 +115,7 @@ def migrate_called_after_annotation_data_one_to_many_mapping() -> ( results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) functionv2_after_a = Function( @@ -126,7 +126,7 @@ def migrate_called_after_annotation_data_one_to_many_mapping() -> ( results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) functionv2_after_b = Function( @@ -137,7 +137,7 @@ def migrate_called_after_annotation_data_one_to_many_mapping() -> ( results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) functionv2_before = Function( @@ -148,7 +148,7 @@ def migrate_called_after_annotation_data_one_to_many_mapping() -> ( results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) mapping_after = OneToManyMapping(1.0, functionv1_after, [functionv2_after_a, functionv2_after_b]) @@ -199,7 +199,7 @@ def migrate_called_after_annotation_data_one_to_one_mapping__no_mapping_found() results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) functionv2_after = Function( @@ -210,7 +210,7 @@ def migrate_called_after_annotation_data_one_to_one_mapping__no_mapping_found() results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) mapping_after = OneToOneMapping(1.0, functionv1_after, functionv2_after) @@ -222,13 +222,13 @@ def migrate_called_after_annotation_data_one_to_one_mapping__no_mapping_found() reviewResult=EnumReviewResult.NONE, calledAfterName="test_before", ) - annotationv2 = CalledAfterAnnotation( + annotationv2 = TodoAnnotation( target="test/test.called_after.test3.test/NewClass/new_test_after", authors=["testauthor", migration_author], reviewers=[], - comment=get_migration_text(annotationv1, mapping_after), - reviewResult=EnumReviewResult.UNSURE, - calledAfterName="test_before", + comment="", + reviewResult=EnumReviewResult.NONE, + newTodo=get_migration_text(annotationv1, mapping_after, for_todo_annotation=True), ) return mapping_after, annotationv1, [annotationv2] @@ -248,7 +248,7 @@ def migrate_called_after_annotation_data_one_to_one_mapping__before_splits() -> results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) functionv1_before = Function( @@ -259,7 +259,7 @@ def migrate_called_after_annotation_data_one_to_one_mapping__before_splits() -> results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) functionv2_after = Function( @@ -270,7 +270,7 @@ def migrate_called_after_annotation_data_one_to_one_mapping__before_splits() -> results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) functionv2_before_a = Function( @@ -281,7 +281,7 @@ def migrate_called_after_annotation_data_one_to_one_mapping__before_splits() -> results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) functionv2_before_b = Function( @@ -292,7 +292,7 @@ def migrate_called_after_annotation_data_one_to_one_mapping__before_splits() -> results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) mapping_after = OneToOneMapping(1.0, functionv1_after, functionv2_after) @@ -335,7 +335,7 @@ def migrate_called_after_annotation_data_one_to_many_mapping__two_classes() -> ( results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) functionv1_before = Function( @@ -346,7 +346,7 @@ def migrate_called_after_annotation_data_one_to_many_mapping__two_classes() -> ( results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) functionv2_after_a = Function( @@ -357,7 +357,7 @@ def migrate_called_after_annotation_data_one_to_many_mapping__two_classes() -> ( results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) functionv2_after_b = Function( @@ -368,7 +368,7 @@ def migrate_called_after_annotation_data_one_to_many_mapping__two_classes() -> ( results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) functionv2_before_a = Function( @@ -379,7 +379,7 @@ def migrate_called_after_annotation_data_one_to_many_mapping__two_classes() -> ( results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) functionv2_before_b = Function( @@ -390,7 +390,7 @@ def migrate_called_after_annotation_data_one_to_many_mapping__two_classes() -> ( results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) mapping_after = OneToManyMapping(1.0, functionv1_after, [functionv2_after_a, functionv2_after_b]) @@ -441,7 +441,7 @@ def migrate_called_after_annotation_data_duplicated() -> ( results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) functionv1_after_2 = Function( @@ -452,7 +452,7 @@ def migrate_called_after_annotation_data_duplicated() -> ( results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) functionv1_before = Function( @@ -463,7 +463,7 @@ def migrate_called_after_annotation_data_duplicated() -> ( results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) functionv2_after = Function( @@ -474,7 +474,7 @@ def migrate_called_after_annotation_data_duplicated() -> ( results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) functionv2_before = Function( @@ -485,7 +485,7 @@ def migrate_called_after_annotation_data_duplicated() -> ( results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) mapping_after = ManyToOneMapping(1.0, [functionv1_after, functionv1_after_2], functionv2_after) diff --git a/tests/migration/annotations/description_migration.py b/tests/migration/annotations/description_migration.py index 2a0f9605..f999b029 100644 --- a/tests/migration/annotations/description_migration.py +++ b/tests/migration/annotations/description_migration.py @@ -6,12 +6,12 @@ ) from library_analyzer.processing.api.model import ( Class, - ClassDocumentation, + ClassDocstring, Function, - FunctionDocumentation, + FunctionDocstring, Parameter, ParameterAssignment, - ParameterDocumentation, + ParameterDocstring, ) from library_analyzer.processing.migration.annotations import ( get_migration_text, @@ -40,7 +40,7 @@ def migrate_description_annotation_data_one_to_one_mapping__function() -> ( results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) @@ -52,7 +52,7 @@ def migrate_description_annotation_data_one_to_one_mapping__function() -> ( results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) @@ -91,7 +91,7 @@ def migrate_description_annotation_data_one_to_many_mapping__class() -> ( superclasses=[], is_public=True, reexported_by=[], - documentation=ClassDocumentation(), + docstring=ClassDocstring(), code="class DescriptionTestClass:\n pass", instance_attributes=[], ) @@ -102,7 +102,7 @@ def migrate_description_annotation_data_one_to_many_mapping__class() -> ( superclasses=[], is_public=True, reexported_by=[], - documentation=ClassDocumentation(), + docstring=ClassDocstring(), code="class NewDescriptionTestClass:\n pass", instance_attributes=[], ) @@ -113,7 +113,7 @@ def migrate_description_annotation_data_one_to_many_mapping__class() -> ( superclasses=[], is_public=True, reexported_by=[], - documentation=ClassDocumentation(), + docstring=ClassDocstring(), code="class NewDescriptionTestClass2:\n pass", instance_attributes=[], ) @@ -125,7 +125,7 @@ def migrate_description_annotation_data_one_to_many_mapping__class() -> ( results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) @@ -180,7 +180,7 @@ def migrate_description_annotation_data_one_to_one_mapping__parameter() -> ( default_value="value", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("str", "value", "docstring"), + docstring=ParameterDocstring("str", "value", "docstring"), ) parameterv2 = Parameter( @@ -190,7 +190,7 @@ def migrate_description_annotation_data_one_to_one_mapping__parameter() -> ( default_value="value", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("str", "value", "docstring"), + docstring=ParameterDocstring("str", "changed_value", "doc"), ) mapping = OneToOneMapping(1.0, parameterv1, parameterv2) @@ -208,7 +208,7 @@ def migrate_description_annotation_data_one_to_one_mapping__parameter() -> ( authors=["testauthor", migration_author], reviewers=[], comment="", - reviewResult=EnumReviewResult.NONE, + reviewResult=EnumReviewResult.UNSURE, newDescription="test description", ) return mapping, annotationv1, [annotationv2] @@ -229,7 +229,7 @@ def migrate_description_annotation_data_duplicated() -> ( results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) functionv1_2 = Function( @@ -240,7 +240,7 @@ def migrate_description_annotation_data_duplicated() -> ( results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) @@ -252,7 +252,7 @@ def migrate_description_annotation_data_duplicated() -> ( results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) diff --git a/tests/migration/annotations/enum_migration.py b/tests/migration/annotations/enum_migration.py index d4dfad22..7b38cf6e 100644 --- a/tests/migration/annotations/enum_migration.py +++ b/tests/migration/annotations/enum_migration.py @@ -7,7 +7,7 @@ from library_analyzer.processing.api.model import ( Parameter, ParameterAssignment, - ParameterDocumentation, + ParameterDocstring, ) from library_analyzer.processing.migration.annotations import ( get_migration_text, @@ -35,7 +35,7 @@ def migrate_enum_annotation_data_one_to_one_mapping() -> ( default_value="value", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("str", "value", "docstring"), + docstring=ParameterDocstring("str", "value", "docstring"), ) parameterv2 = Parameter( id_="test/test.enum.test1.TestB", @@ -44,7 +44,7 @@ def migrate_enum_annotation_data_one_to_one_mapping() -> ( default_value="value", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("str", "value", "docstring"), + docstring=ParameterDocstring("str", "value", "docstring"), ) mapping = OneToOneMapping(1.0, parameterv1, parameterv2) enum_annotation = EnumAnnotation( @@ -82,7 +82,7 @@ def migrate_enum_annotation_data_one_to_many_mapping() -> ( default_value="value", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("str", "value", "docstring"), + docstring=ParameterDocstring("str", "value", "docstring"), ) parameterv2_a = Parameter( id_="test/test.enum.test2.TestA", @@ -91,7 +91,7 @@ def migrate_enum_annotation_data_one_to_many_mapping() -> ( default_value="value", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("str", "value", "docstring"), + docstring=ParameterDocstring("str", "value", "docstring"), ) parameterv2_b = Parameter( id_="test/test.enum.test2.TestB", @@ -100,7 +100,7 @@ def migrate_enum_annotation_data_one_to_many_mapping() -> ( default_value="value", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("str", "value", "docstring"), + docstring=ParameterDocstring("str", "value", "docstring"), ) mapping = OneToManyMapping(1.0, parameterv1, [parameterv2_a, parameterv2_b]) enum_annotation = EnumAnnotation( @@ -151,7 +151,7 @@ def migrate_enum_annotation_data_one_to_many_mapping__only_one_relevant_mapping( default_value="value", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("str", "value", "docstring"), + docstring=ParameterDocstring("str", "value", "docstring"), ) parameterv2_a = Parameter( id_="test/test.enum.test3.TestA", @@ -160,7 +160,7 @@ def migrate_enum_annotation_data_one_to_many_mapping__only_one_relevant_mapping( default_value=None, assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("", "", ""), + docstring=ParameterDocstring("", "", ""), ) parameterv2_b = Parameter( id_="test/test.enum.test3.TestB", @@ -169,7 +169,7 @@ def migrate_enum_annotation_data_one_to_many_mapping__only_one_relevant_mapping( default_value="value", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("str", "value", "docstring"), + docstring=ParameterDocstring("str", "value", "docstring"), ) parameterv2_c = Parameter( id_="test/test.enum.test3.TestC", @@ -178,7 +178,7 @@ def migrate_enum_annotation_data_one_to_many_mapping__only_one_relevant_mapping( default_value="0", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("int", "0", "docstring"), + docstring=ParameterDocstring("int", "0", "docstring"), ) mapping = OneToManyMapping(1.0, parameterv1, [parameterv2_a, parameterv2_b, parameterv2_c]) enum_annotation = EnumAnnotation( @@ -229,7 +229,7 @@ def migrate_enum_annotation_data_duplicated() -> ( default_value="value", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("str", "value", "docstring"), + docstring=ParameterDocstring("str", "value", "docstring"), ) parameterv1_2 = Parameter( id_="test/test.enum.duplicate.TestA_2", @@ -238,7 +238,7 @@ def migrate_enum_annotation_data_duplicated() -> ( default_value="value", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("str", "value", "docstring"), + docstring=ParameterDocstring("str", "value", "docstring"), ) parameterv2 = Parameter( id_="test/test.enum.duplicate.TestB", @@ -247,7 +247,7 @@ def migrate_enum_annotation_data_duplicated() -> ( default_value="value", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("str", "value", "docstring"), + docstring=ParameterDocstring("str", "value", "docstring"), ) mapping = ManyToOneMapping(1.0, [parameterv1, parameterv1_2], parameterv2) enum_annotation = EnumAnnotation( diff --git a/tests/migration/annotations/expert_migration.py b/tests/migration/annotations/expert_migration.py index 8d831013..6205802b 100644 --- a/tests/migration/annotations/expert_migration.py +++ b/tests/migration/annotations/expert_migration.py @@ -6,12 +6,12 @@ ) from library_analyzer.processing.api.model import ( Class, - ClassDocumentation, + ClassDocstring, Function, - FunctionDocumentation, + FunctionDocstring, Parameter, ParameterAssignment, - ParameterDocumentation, + ParameterDocstring, ) from library_analyzer.processing.migration.annotations import ( get_migration_text, @@ -40,7 +40,7 @@ def migrate_expert_annotation_data__function() -> ( results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) @@ -52,7 +52,7 @@ def migrate_expert_annotation_data__function() -> ( results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) @@ -89,7 +89,7 @@ def migrate_expert_annotation_data__class() -> ( superclasses=[], is_public=True, reexported_by=[], - documentation=ClassDocumentation(), + docstring=ClassDocstring(), code="class ExpertTestClass:\n pass", instance_attributes=[], ) @@ -100,7 +100,7 @@ def migrate_expert_annotation_data__class() -> ( superclasses=[], is_public=True, reexported_by=[], - documentation=ClassDocumentation(), + docstring=ClassDocstring(), code="class NewExpertTestClass:\n pass", instance_attributes=[], ) @@ -112,7 +112,7 @@ def migrate_expert_annotation_data__class() -> ( results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) @@ -157,7 +157,7 @@ def migrate_expert_annotation_data__parameter() -> ( default_value="'this is a string'", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("str", "this is a string", ""), + docstring=ParameterDocstring("str", "this is a string", ""), ) parameterv2 = Parameter( id_="test/test.expert/test3/testB", @@ -166,7 +166,7 @@ def migrate_expert_annotation_data__parameter() -> ( default_value="'test string'", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("str", "'test string'", ""), + docstring=ParameterDocstring("str", "'test string'", ""), ) mapping = OneToOneMapping(1.0, parameterv1, parameterv2) annotationv1 = ExpertAnnotation( @@ -201,7 +201,7 @@ def migrate_expert_annotation_data_duplicated() -> ( results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) functionv1_2 = Function( @@ -212,7 +212,7 @@ def migrate_expert_annotation_data_duplicated() -> ( results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) @@ -224,7 +224,7 @@ def migrate_expert_annotation_data_duplicated() -> ( results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) diff --git a/tests/migration/annotations/group_migration.py b/tests/migration/annotations/group_migration.py index ab274638..aeac1dcd 100644 --- a/tests/migration/annotations/group_migration.py +++ b/tests/migration/annotations/group_migration.py @@ -6,12 +6,12 @@ ) from library_analyzer.processing.api.model import ( Class, - ClassDocumentation, + ClassDocstring, Function, - FunctionDocumentation, + FunctionDocstring, Parameter, ParameterAssignment, - ParameterDocumentation, + ParameterDocstring, ) from library_analyzer.processing.migration.annotations import ( get_migration_text, @@ -40,7 +40,7 @@ def migrate_group_annotation_data_one_to_many_mapping() -> ( default_value="1", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("int", "1", "int in the range of (0, 10)"), + docstring=ParameterDocstring("int", "1", "int in the range of (0, 10)"), ) parameterv1_b = Parameter( id_="test/test.group.test1.test/TestClass/test/parameter_b", @@ -49,7 +49,7 @@ def migrate_group_annotation_data_one_to_many_mapping() -> ( default_value="'test'", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("str", "'test'", "str"), + docstring=ParameterDocstring("str", "'test'", "str"), ) parameterv1_c = Parameter( id_="test/test.group.test1.test/TestClass/test/parameter_c", @@ -58,7 +58,7 @@ def migrate_group_annotation_data_one_to_many_mapping() -> ( default_value="'test_c'", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("str", "'test_c'", "str"), + docstring=ParameterDocstring("str", "'test_c'", "str"), ) functionv1 = Function( id="test/test.group.test1.test/TestClass/test", @@ -68,7 +68,7 @@ def migrate_group_annotation_data_one_to_many_mapping() -> ( results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) @@ -79,7 +79,7 @@ def migrate_group_annotation_data_one_to_many_mapping() -> ( default_value="1", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("int", "1", "int in the range of (0, 10)"), + docstring=ParameterDocstring("int", "1", "int in the range of (0, 10)"), ) parameterv2_b = Parameter( id_="test/test.group.test1.test/NewTestClass/test/new_parameter_b", @@ -88,7 +88,7 @@ def migrate_group_annotation_data_one_to_many_mapping() -> ( default_value="'test'", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("str", "'test'", "str"), + docstring=ParameterDocstring("str", "'test'", "str"), ) parameterv2_c = Parameter( id_="test/test.group.test1.test/NewTestClass/test/new_parameter_c", @@ -97,7 +97,7 @@ def migrate_group_annotation_data_one_to_many_mapping() -> ( default_value="'test_c'", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("str", "'test_c'", "str"), + docstring=ParameterDocstring("str", "'test_c'", "str"), ) functionv2 = Function( id="test/test.group.test1.test/NewTestClass/test", @@ -107,7 +107,7 @@ def migrate_group_annotation_data_one_to_many_mapping() -> ( results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) @@ -118,7 +118,7 @@ def migrate_group_annotation_data_one_to_many_mapping() -> ( default_value="1", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("int", "1", "int in the range of (0, 10)"), + docstring=ParameterDocstring("int", "1", "int in the range of (0, 10)"), ) parameterv2_2_c = Parameter( id_="test/test.group.test2.test/NewTestClass/test/new_parameter_c", @@ -127,7 +127,7 @@ def migrate_group_annotation_data_one_to_many_mapping() -> ( default_value="test", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("str", "test_c", "str"), + docstring=ParameterDocstring("str", "test_c", "str"), ) functionv2_2 = Function( id="test/test.group.test2.test/NewTestClass/test", @@ -137,7 +137,7 @@ def migrate_group_annotation_data_one_to_many_mapping() -> ( results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) @@ -148,7 +148,7 @@ def migrate_group_annotation_data_one_to_many_mapping() -> ( default_value="'test'", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("str", "'test'", "str"), + docstring=ParameterDocstring("str", "'test'", "str"), ) parameterv2_3_c = Parameter( id_="test/test.group.test3.test/NewTestClass/test/new_parameter_c", @@ -157,7 +157,7 @@ def migrate_group_annotation_data_one_to_many_mapping() -> ( default_value="test", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("str", "test_c", "str"), + docstring=ParameterDocstring("str", "test_c", "str"), ) functionv2_3 = Function( id="test/test.group.test3.test/NewTestClass/test", @@ -167,7 +167,7 @@ def migrate_group_annotation_data_one_to_many_mapping() -> ( results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) parameterv2_4_b = Parameter( @@ -177,7 +177,7 @@ def migrate_group_annotation_data_one_to_many_mapping() -> ( default_value="'test'", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("str", "'test'", "str"), + docstring=ParameterDocstring("str", "'test'", "str"), ) functionv2_4 = Function( id="test/test.group.test4.test/NewTestClass/test", @@ -187,7 +187,7 @@ def migrate_group_annotation_data_one_to_many_mapping() -> ( results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) functionv2_5 = Function( @@ -198,7 +198,7 @@ def migrate_group_annotation_data_one_to_many_mapping() -> ( results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) classv2_6 = Class( @@ -208,7 +208,7 @@ def migrate_group_annotation_data_one_to_many_mapping() -> ( superclasses=[], is_public=True, reexported_by=[], - documentation=ClassDocumentation(), + docstring=ClassDocstring(), code="class NewClass:\n pass", instance_attributes=[], ) @@ -328,7 +328,7 @@ def migrate_group_annotation_data_one_to_one_mapping() -> ( results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) parameterv1_a = Parameter( @@ -338,7 +338,7 @@ def migrate_group_annotation_data_one_to_one_mapping() -> ( default_value="1", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("int", "1", "int in the range of (0, 10)"), + docstring=ParameterDocstring("int", "1", "int in the range of (0, 10)"), ) parameterv1_b = Parameter( id_="test/test.group.test6.test/TestClass/test/parameter_b", @@ -347,7 +347,7 @@ def migrate_group_annotation_data_one_to_one_mapping() -> ( default_value="'test'", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("str", "'test'", "str"), + docstring=ParameterDocstring("str", "'test'", "str"), ) functionv2 = Function( @@ -358,7 +358,7 @@ def migrate_group_annotation_data_one_to_one_mapping() -> ( results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) parameterv2_a = Parameter( @@ -368,7 +368,7 @@ def migrate_group_annotation_data_one_to_one_mapping() -> ( default_value="1", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("int", "1", "int in the range of (0, 10)"), + docstring=ParameterDocstring("int", "1", "int in the range of (0, 10)"), ) parameterv2_b = Parameter( id_="test/test.group.test6.test/NewTestClass/test/new_parameter_b", @@ -377,7 +377,7 @@ def migrate_group_annotation_data_one_to_one_mapping() -> ( default_value="'test'", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("str", "'test'", "str"), + docstring=ParameterDocstring("str", "'test'", "str"), ) mapping_function = OneToOneMapping(1.0, functionv1, functionv2) @@ -423,7 +423,7 @@ def migrate_group_annotation_data_one_to_one_mapping__one_mapping_for_parameters results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) parameterv1_a = Parameter( @@ -433,7 +433,7 @@ def migrate_group_annotation_data_one_to_one_mapping__one_mapping_for_parameters default_value="1", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("int", "1", "int in the range of (0, 10)"), + docstring=ParameterDocstring("int", "1", "int in the range of (0, 10)"), ) parameterv1_b = Parameter( id_="test/test.group.test7.test/TestClass/test/parameter_b", @@ -442,7 +442,7 @@ def migrate_group_annotation_data_one_to_one_mapping__one_mapping_for_parameters default_value="'test'", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("str", "'test'", "str"), + docstring=ParameterDocstring("str", "'test'", "str"), ) functionv2 = Function( @@ -453,7 +453,7 @@ def migrate_group_annotation_data_one_to_one_mapping__one_mapping_for_parameters results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) parameterv2_a = Parameter( @@ -463,7 +463,7 @@ def migrate_group_annotation_data_one_to_one_mapping__one_mapping_for_parameters default_value="1", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("int", "1", "int in the range of (0, 10)"), + docstring=ParameterDocstring("int", "1", "int in the range of (0, 10)"), ) parameterv2_b = Parameter( id_="test/test.group.test7.test/NewTestClass/test/new_parameter_b", @@ -472,7 +472,7 @@ def migrate_group_annotation_data_one_to_one_mapping__one_mapping_for_parameters default_value="'test'", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("str", "'test'", "str"), + docstring=ParameterDocstring("str", "'test'", "str"), ) mapping_function = OneToOneMapping(1.0, functionv1, functionv2) @@ -516,7 +516,7 @@ def migrate_group_annotation_data_duplicated() -> ( default_value="1", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("int", "1", "int in the range of (0, 10)"), + docstring=ParameterDocstring("int", "1", "int in the range of (0, 10)"), ) parameterv1_b = Parameter( id_="test/test.group.duplicate.test/TestClass/test/parameter_b", @@ -525,7 +525,7 @@ def migrate_group_annotation_data_duplicated() -> ( default_value="'test'", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("str", "'test'", "str"), + docstring=ParameterDocstring("str", "'test'", "str"), ) parameterv1_c = Parameter( id_="test/test.group.duplicate.test/TestClass/test/parameter_c", @@ -534,7 +534,7 @@ def migrate_group_annotation_data_duplicated() -> ( default_value="'test_c'", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("str", "'test_c'", "str"), + docstring=ParameterDocstring("str", "'test_c'", "str"), ) parameterv1_a_2 = Parameter( id_="test/test.group.duplicate.test/TestClass/test_2/parameter_a_2", @@ -543,7 +543,7 @@ def migrate_group_annotation_data_duplicated() -> ( default_value="1", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("int", "1", "int in the range of (0, 10)"), + docstring=ParameterDocstring("int", "1", "int in the range of (0, 10)"), ) parameterv1_b_2 = Parameter( id_="test/test.group.duplicate.test/TestClass/test_2/parameter_b_2", @@ -552,7 +552,7 @@ def migrate_group_annotation_data_duplicated() -> ( default_value="'test'", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("str", "'test'", "str"), + docstring=ParameterDocstring("str", "'test'", "str"), ) parameterv1_c_2 = Parameter( id_="test/test.group.duplicate.test/TestClass/test_2/parameter_c_2", @@ -561,7 +561,7 @@ def migrate_group_annotation_data_duplicated() -> ( default_value="'test_c'", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("str", "'test_c'", "str"), + docstring=ParameterDocstring("str", "'test_c'", "str"), ) functionv1 = Function( id="test/test.group.duplicate.test/TestClass/test", @@ -571,7 +571,7 @@ def migrate_group_annotation_data_duplicated() -> ( results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) functionv1_2 = Function( @@ -582,7 +582,7 @@ def migrate_group_annotation_data_duplicated() -> ( results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) @@ -593,7 +593,7 @@ def migrate_group_annotation_data_duplicated() -> ( default_value="1", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("int", "1", "int in the range of (0, 10)"), + docstring=ParameterDocstring("int", "1", "int in the range of (0, 10)"), ) parameterv2_b = Parameter( id_="test/test.group.duplicate.test/NewTestClass/test/new_parameter_b", @@ -602,7 +602,7 @@ def migrate_group_annotation_data_duplicated() -> ( default_value="'test'", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("str", "'test'", "str"), + docstring=ParameterDocstring("str", "'test'", "str"), ) parameterv2_c = Parameter( id_="test/test.group.duplicate.test/NewTestClass/test/new_parameter_c", @@ -611,7 +611,7 @@ def migrate_group_annotation_data_duplicated() -> ( default_value="'test_c'", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("str", "'test_c'", "str"), + docstring=ParameterDocstring("str", "'test_c'", "str"), ) functionv2 = Function( id="test/test.group.duplicate.test/NewTestClass/test", @@ -621,7 +621,7 @@ def migrate_group_annotation_data_duplicated() -> ( results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) diff --git a/tests/migration/annotations/move_migration.py b/tests/migration/annotations/move_migration.py index 15e620c3..3ce66c50 100644 --- a/tests/migration/annotations/move_migration.py +++ b/tests/migration/annotations/move_migration.py @@ -6,9 +6,9 @@ ) from library_analyzer.processing.api.model import ( Class, - ClassDocumentation, + ClassDocstring, Function, - FunctionDocumentation, + FunctionDocstring, ) from library_analyzer.processing.migration.annotations import ( get_migration_text, @@ -37,7 +37,7 @@ def migrate_move_annotation_data_one_to_one_mapping__global_function() -> ( results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) @@ -49,7 +49,7 @@ def migrate_move_annotation_data_one_to_one_mapping__global_function() -> ( results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) @@ -88,7 +88,7 @@ def migrate_move_annotation_data_one_to_one_mapping__class() -> ( superclasses=[], is_public=True, reexported_by=[], - documentation=ClassDocumentation(), + docstring=ClassDocstring(), code="class MoveTestClass:\n pass", instance_attributes=[], ) @@ -99,7 +99,7 @@ def migrate_move_annotation_data_one_to_one_mapping__class() -> ( superclasses=[], is_public=True, reexported_by=[], - documentation=ClassDocumentation(), + docstring=ClassDocstring(), code="class NewMoveTestClass:\n pass", instance_attributes=[], ) @@ -140,7 +140,7 @@ def migrate_move_annotation_data_one_to_many_mapping() -> ( results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) @@ -152,7 +152,7 @@ def migrate_move_annotation_data_one_to_many_mapping() -> ( results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) @@ -164,7 +164,7 @@ def migrate_move_annotation_data_one_to_many_mapping() -> ( results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) @@ -212,7 +212,7 @@ def migrate_move_annotation_data_one_to_one_mapping_duplicated() -> ( results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) functionv1_2 = Function( @@ -223,7 +223,7 @@ def migrate_move_annotation_data_one_to_one_mapping_duplicated() -> ( results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) @@ -235,7 +235,7 @@ def migrate_move_annotation_data_one_to_one_mapping_duplicated() -> ( results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) diff --git a/tests/migration/annotations/remove_migration.py b/tests/migration/annotations/remove_migration.py index 6ee974c7..251aa9c9 100644 --- a/tests/migration/annotations/remove_migration.py +++ b/tests/migration/annotations/remove_migration.py @@ -6,9 +6,9 @@ ) from library_analyzer.processing.api.model import ( Class, - ClassDocumentation, + ClassDocstring, Function, - FunctionDocumentation, + FunctionDocstring, ) from library_analyzer.processing.migration.annotations import ( get_migration_text, @@ -37,7 +37,7 @@ def migrate_remove_annotation_data_one_to_one_mapping() -> ( results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) @@ -49,7 +49,7 @@ def migrate_remove_annotation_data_one_to_one_mapping() -> ( results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) @@ -86,7 +86,7 @@ def migrate_remove_annotation_data_one_to_many_mapping() -> ( superclasses=[], is_public=True, reexported_by=[], - documentation=ClassDocumentation(), + docstring=ClassDocstring(), code="class RemoveTestClass:\n pass", instance_attributes=[], ) @@ -97,7 +97,7 @@ def migrate_remove_annotation_data_one_to_many_mapping() -> ( superclasses=[], is_public=True, reexported_by=[], - documentation=ClassDocumentation(), + docstring=ClassDocstring(), code="class NewRemoveTestClass:\n pass", instance_attributes=[], ) @@ -108,7 +108,7 @@ def migrate_remove_annotation_data_one_to_many_mapping() -> ( superclasses=[], is_public=True, reexported_by=[], - documentation=ClassDocumentation(), + docstring=ClassDocstring(), code="class NewRemoveTestClass2:\n pass", instance_attributes=[], ) @@ -120,7 +120,7 @@ def migrate_remove_annotation_data_one_to_many_mapping() -> ( results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) @@ -173,7 +173,7 @@ def migrate_remove_annotation_data_duplicated() -> ( results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) functionv1_2 = Function( @@ -184,7 +184,7 @@ def migrate_remove_annotation_data_duplicated() -> ( results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) @@ -196,7 +196,7 @@ def migrate_remove_annotation_data_duplicated() -> ( results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) diff --git a/tests/migration/annotations/rename_migration.py b/tests/migration/annotations/rename_migration.py index d64e4329..fed384c3 100644 --- a/tests/migration/annotations/rename_migration.py +++ b/tests/migration/annotations/rename_migration.py @@ -6,10 +6,10 @@ ) from library_analyzer.processing.api.model import ( Class, - ClassDocumentation, + ClassDocstring, Parameter, ParameterAssignment, - ParameterDocumentation, + ParameterDocstring, ) from library_analyzer.processing.migration.annotations import ( get_migration_text, @@ -31,26 +31,26 @@ def migrate_rename_annotation_data_one_to_one_mapping() -> ( ] ): parameterv1 = Parameter( - id_="test/test.rename.test1.Test_", - name="Test", - qname="test.rename.test1.Test_", + id_="test/test.rename/Test1/test", + name="test", + qname="test.rename.Test1.test", default_value=None, assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("", "", ""), + docstring=ParameterDocstring("", "", ""), ) parameterv2 = Parameter( - id_="test/test.rename.test1.TestB", - name="TestB", - qname="test.rename.test1.TestB", + id_="test/test.rename/Test1/test", + name="test", + qname="test.rename.Test1.test", default_value=None, assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("", "", ""), + docstring=ParameterDocstring("", "", ""), ) mappings = OneToOneMapping(1.0, parameterv1, parameterv2) annotationv1 = RenameAnnotation( - target="test/test.rename.test1.Test_", + target="test/test.rename/Test1/test", authors=["testauthor"], reviewers=[], comment="", @@ -58,7 +58,7 @@ def migrate_rename_annotation_data_one_to_one_mapping() -> ( newName="TestE", ) annotationv2 = RenameAnnotation( - target="test/test.rename.test1.TestB", + target="test/test.rename/Test1/test", authors=["testauthor", migration_author], reviewers=[], comment="", @@ -76,31 +76,31 @@ def migrate_rename_annotation_data_one_to_many_mapping() -> ( ] ): parameterv1 = Parameter( - id_="test/test.rename.test3.Test", - name="Test", - qname="test.rename.test3.Test", + id_="test/test.rename/Test3/test", + name="test", + qname="test.rename/Test3/test", default_value=None, assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("", "", ""), + docstring=ParameterDocstring("", "", ""), ) parameterv2_a = Parameter( - id_="test/test.rename.test3.TestA", - name="TestA", - qname="test.rename.test3.TestA", + id_="test/test.rename/Test3/testA", + name="testA", + qname="test.rename.Test3.testA", default_value=None, assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("", "", ""), + docstring=ParameterDocstring("", "", ""), ) parameterv2_b = Parameter( - id_="test/test.rename.test3.Test", - name="Test", - qname="test.rename.test3.Test", + id_="test/test.rename/Test3/test", + name="test", + qname="test.rename.Test3.test", default_value=None, assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("", "", ""), + docstring=ParameterDocstring("", "", ""), ) classv2 = Class( id="test/test.rename.test3/NewClass", @@ -109,13 +109,13 @@ def migrate_rename_annotation_data_one_to_many_mapping() -> ( superclasses=[], is_public=True, reexported_by=[], - documentation=ClassDocumentation(), + docstring=ClassDocstring(), code="class NewClass:\n pass", instance_attributes=[], ) mappings = OneToManyMapping(1.0, parameterv1, [parameterv2_a, parameterv2_b, classv2]) annotationv1 = RenameAnnotation( - target="test/test.rename.test3.Test", + target="test/test.rename/Test3/test", authors=["testauthor"], reviewers=[], comment="", @@ -123,7 +123,7 @@ def migrate_rename_annotation_data_one_to_many_mapping() -> ( newName="TestZ", ) annotationv2_a = RenameAnnotation( - target="test/test.rename.test3.TestA", + target="test/test.rename/Test3/testA", authors=["testauthor", migration_author], reviewers=[], comment=get_migration_text(annotationv1, mappings), @@ -131,7 +131,7 @@ def migrate_rename_annotation_data_one_to_many_mapping() -> ( newName="TestZ", ) annotationv2_b = RenameAnnotation( - target="test/test.rename.test3.Test", + target="test/test.rename/Test3/test", authors=["testauthor", migration_author], reviewers=[], comment="", @@ -162,34 +162,34 @@ def migrate_rename_annotation_data_duplicated() -> ( ): parameterv1 = Parameter( id_="test/test.rename.duplicate.Test_", - name="Test", + name="Test_", qname="test.rename.duplicate.Test_", default_value=None, assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("", "", ""), + docstring=ParameterDocstring("", "", ""), ) parameterv1_2 = Parameter( - id_="test/test.rename.duplicate.Test_2", - name="Test", - qname="test.rename.duplicate.Test_2", + id_="test/test.rename.duplicate.a/Test_", + name="Test_", + qname="test.rename.duplicate.a/Test_", default_value=None, assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("", "", ""), + docstring=ParameterDocstring("", "", ""), ) parameterv2 = Parameter( - id_="test/test.rename.duplicate.TestB", - name="TestB", - qname="test.rename.duplicate.TestB", + id_="test/test.rename.duplicate.b/Test_", + name="Test_", + qname="test.rename.duplicate.b/Test_", default_value=None, assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("", "", ""), + docstring=ParameterDocstring("", "", ""), ) - mappings = ManyToOneMapping(1.0, [parameterv1, parameterv1_2], parameterv2) + mapping = ManyToOneMapping(1.0, [parameterv1, parameterv1_2], parameterv2) annotationv1 = RenameAnnotation( - target="test/test.rename.duplicate.Test_", + target="test/test.rename.duplicate/Test_", authors=["testauthor"], reviewers=[], comment="", @@ -197,7 +197,7 @@ def migrate_rename_annotation_data_duplicated() -> ( newName="TestE", ) annotationv1_2 = RenameAnnotation( - target="test/test.rename.duplicate.Test_2", + target="test/test.rename.duplicate.a/Test_", authors=["testauthor"], reviewers=[], comment="", @@ -205,11 +205,11 @@ def migrate_rename_annotation_data_duplicated() -> ( newName="TestE", ) annotationv2 = RenameAnnotation( - target="test/test.rename.duplicate.TestB", + target="test/test.rename.duplicate.b/Test_", authors=["testauthor", migration_author], reviewers=[], comment="", reviewResult=EnumReviewResult.NONE, newName="TestE", ) - return mappings, [annotationv1, annotationv1_2], [annotationv2] + return mapping, [annotationv1, annotationv1_2], [annotationv2] diff --git a/tests/migration/annotations/todo_migration.py b/tests/migration/annotations/todo_migration.py index 477bf9ae..2fa2108d 100644 --- a/tests/migration/annotations/todo_migration.py +++ b/tests/migration/annotations/todo_migration.py @@ -5,10 +5,10 @@ ) from library_analyzer.processing.api.model import ( Class, - ClassDocumentation, + ClassDocstring, Parameter, ParameterAssignment, - ParameterDocumentation, + ParameterDocstring, ) from library_analyzer.processing.migration.annotations import ( get_migration_text, @@ -31,7 +31,7 @@ def migrate_todo_annotation_data_one_to_one_mapping() -> tuple[Mapping, Abstract default_value=None, assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("str", "", ""), + docstring=ParameterDocstring("str", "", ""), ) parameterv2 = Parameter( id_="test/test.todo.test1.Test", @@ -40,7 +40,7 @@ def migrate_todo_annotation_data_one_to_one_mapping() -> tuple[Mapping, Abstract default_value=None, assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("str", "", ""), + docstring=ParameterDocstring("str", "", ""), ) mappings = OneToOneMapping(1.0, parameterv1, parameterv2) annotationsv1 = TodoAnnotation( @@ -76,7 +76,7 @@ def migrate_todo_annotation_data_one_to_many_mapping() -> ( default_value=None, assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("str", "", ""), + docstring=ParameterDocstring("str", "", ""), ) parameterv2_a = Parameter( id_="test/test.todo.test2.TestA", @@ -85,7 +85,7 @@ def migrate_todo_annotation_data_one_to_many_mapping() -> ( default_value=None, assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("str", "", ""), + docstring=ParameterDocstring("str", "", ""), ) parameterv2_b = Parameter( id_="test/test.todo.test2.TestB", @@ -94,7 +94,7 @@ def migrate_todo_annotation_data_one_to_many_mapping() -> ( default_value=None, assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("str", "", ""), + docstring=ParameterDocstring("str", "", ""), ) mappings = OneToManyMapping(1.0, parameterv1, [parameterv2_a, parameterv2_b]) annotationsv1 = TodoAnnotation( @@ -132,7 +132,7 @@ def migrate_todo_annotation_data_many_to_many_mapping() -> tuple[Mapping, Abstra default_value=None, assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("str", "", ""), + docstring=ParameterDocstring("str", "", ""), ) parameterv1_b = Parameter( id_="test/test.todo.test3.TestB", @@ -141,7 +141,7 @@ def migrate_todo_annotation_data_many_to_many_mapping() -> tuple[Mapping, Abstra default_value=None, assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("str", "", ""), + docstring=ParameterDocstring("str", "", ""), ) parameterv2_a = Parameter( id_="test/test.todo.test3.NewTestA", @@ -150,7 +150,7 @@ def migrate_todo_annotation_data_many_to_many_mapping() -> tuple[Mapping, Abstra default_value=None, assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("str", "", ""), + docstring=ParameterDocstring("str", "", ""), ) parameterv2_b = Parameter( id_="test/test.todo.test3.NewTestB", @@ -159,7 +159,7 @@ def migrate_todo_annotation_data_many_to_many_mapping() -> tuple[Mapping, Abstra default_value=None, assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("str", "", ""), + docstring=ParameterDocstring("str", "", ""), ) classv2 = Class( id="test/test.todo.test3.TestTodoClass", @@ -168,7 +168,7 @@ def migrate_todo_annotation_data_many_to_many_mapping() -> tuple[Mapping, Abstra superclasses=[], is_public=True, reexported_by=[], - documentation=ClassDocumentation(), + docstring=ClassDocstring(), code="class TestTodoClass:\n pass", instance_attributes=[], ) @@ -202,7 +202,7 @@ def migrate_todo_annotation_data_many_to_many_mapping() -> tuple[Mapping, Abstra authors=["testauthor", migration_author], reviewers=[], comment="", - reviewResult=EnumReviewResult.NONE, + reviewResult=EnumReviewResult.UNSURE, newTodo=get_migration_text(annotationv1, mappings, for_todo_annotation=True), ) return ( @@ -220,7 +220,7 @@ def migrate_todo_annotation_data_duplicated() -> tuple[Mapping, list[AbstractAnn default_value=None, assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("str", "", ""), + docstring=ParameterDocstring("str", "", ""), ) parameterv1_2 = Parameter( id_="test/test.todo.duplicate.Test_2", @@ -229,7 +229,7 @@ def migrate_todo_annotation_data_duplicated() -> tuple[Mapping, list[AbstractAnn default_value=None, assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("str", "", ""), + docstring=ParameterDocstring("str", "", ""), ) parameterv2 = Parameter( id_="test/test.todo.duplicate.Test", @@ -238,7 +238,7 @@ def migrate_todo_annotation_data_duplicated() -> tuple[Mapping, list[AbstractAnn default_value=None, assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("str", "", ""), + docstring=ParameterDocstring("str", "", ""), ) mappings = ManyToOneMapping(1.0, [parameterv1, parameterv1_2], parameterv2) annotationsv1 = TodoAnnotation( diff --git a/tests/migration/annotations/value_migration.py b/tests/migration/annotations/value_migration.py index 37027da6..fbec3c19 100644 --- a/tests/migration/annotations/value_migration.py +++ b/tests/migration/annotations/value_migration.py @@ -13,7 +13,7 @@ NamedType, Parameter, ParameterAssignment, - ParameterDocumentation, + ParameterDocstring, ) from library_analyzer.processing.migration.annotations import ( get_migration_text, @@ -41,7 +41,7 @@ def migrate_constant_annotation_data_one_to_one_mapping() -> ( default_value="'this is a string'", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("str", "this is a string", ""), + docstring=ParameterDocstring("str", "this is a string", ""), ) parameterv2 = Parameter( id_="test/test.value.test1.testB", @@ -50,7 +50,7 @@ def migrate_constant_annotation_data_one_to_one_mapping() -> ( default_value="'test string'", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("str", "'test string'", ""), + docstring=ParameterDocstring("str", "'test string'", ""), ) mapping = OneToOneMapping(1.0, parameterv1, parameterv2) annotation = ConstantAnnotation( @@ -88,7 +88,7 @@ def migrate_omitted_annotation_data_one_to_one_mapping() -> ( default_value="True", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("bool", "True", ""), + docstring=ParameterDocstring("bool", "True", ""), ) parameterv2 = Parameter( id_="test/test.value.test2.testB", @@ -97,7 +97,7 @@ def migrate_omitted_annotation_data_one_to_one_mapping() -> ( default_value="True", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("bool", "True", ""), + docstring=ParameterDocstring("bool", "True", ""), ) annotation = OmittedAnnotation( target="test/test.value.test2.testA", @@ -130,7 +130,7 @@ def migrate_optional_annotation_data_one_to_one_mapping() -> ( default_value="True", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("bool", "True", ""), + docstring=ParameterDocstring("bool", "True", ""), ) parameterv2 = Parameter( id_="test/test.value.test3.testB", @@ -139,7 +139,7 @@ def migrate_optional_annotation_data_one_to_one_mapping() -> ( default_value="False", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("bool", "False", ""), + docstring=ParameterDocstring("bool", "False", ""), ) annotation = OptionalAnnotation( target="test/test.value.test3.testA", @@ -176,7 +176,7 @@ def migrate_required_annotation_data_one_to_one_mapping() -> ( default_value="'test'", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("str", "'test'", ""), + docstring=ParameterDocstring("str", "'test'", ""), ) parameterv2 = Parameter( id_="test/test.value.test4.testB", @@ -185,7 +185,7 @@ def migrate_required_annotation_data_one_to_one_mapping() -> ( default_value="'test_string'", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("str", "'test_string'", ""), + docstring=ParameterDocstring("str", "'test_string'", ""), ) annotation = RequiredAnnotation( target="test/test.value.test4.testA", @@ -218,7 +218,7 @@ def migrate_constant_annotation_data_one_to_many_mapping() -> ( default_value="2.0", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("float", "2.0", ""), + docstring=ParameterDocstring("float", "2.0", ""), ) parameterv2_a = Parameter( @@ -228,7 +228,7 @@ def migrate_constant_annotation_data_one_to_many_mapping() -> ( default_value="5", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("int", "5", "int in the range of (0, 10)"), + docstring=ParameterDocstring("int", "5", "int in the range of (0, 10)"), ) parameterv2_b = Parameter( id_="test/test.value.test5.testB", @@ -237,7 +237,7 @@ def migrate_constant_annotation_data_one_to_many_mapping() -> ( default_value=None, assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("", "", ""), + docstring=ParameterDocstring("", "", ""), ) parameterv2_c = Parameter( id_="test/test.value.test5.testC", @@ -246,7 +246,7 @@ def migrate_constant_annotation_data_one_to_many_mapping() -> ( default_value="test_value", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("str", "'test_string'", ""), + docstring=ParameterDocstring("str", "'test_string'", ""), ) parameterv2_d = Parameter( id_="test/test.value.test5.testD", @@ -255,9 +255,9 @@ def migrate_constant_annotation_data_one_to_many_mapping() -> ( default_value="3.0", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("float", "3.0", ""), + docstring=ParameterDocstring("float", "3.0", ""), ) - attribute = Attribute("test_attribute", NamedType("str")) + attribute = Attribute("test_attribute", "test_attribute", NamedType("str")) mapping = OneToManyMapping( 1.0, @@ -329,7 +329,7 @@ def migrate_optional_annotation_data_one_to_many_mapping() -> ( default_value="2", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("int", "2", ""), + docstring=ParameterDocstring("int", "2", ""), ) parameterv2_a = Parameter( @@ -339,7 +339,7 @@ def migrate_optional_annotation_data_one_to_many_mapping() -> ( default_value="5", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("float", "5.0", "float"), + docstring=ParameterDocstring("float", "5.0", "float"), ) parameterv2_b = Parameter( id_="test/test.value.test6.testB", @@ -348,7 +348,7 @@ def migrate_optional_annotation_data_one_to_many_mapping() -> ( default_value=None, assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("", "", ""), + docstring=ParameterDocstring("", "", ""), ) parameterv2_c = Parameter( id_="test/test.value.test6.testC", @@ -357,7 +357,7 @@ def migrate_optional_annotation_data_one_to_many_mapping() -> ( default_value="test_value", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("str", "test_string", ""), + docstring=ParameterDocstring("str", "test_string", ""), ) parameterv2_d = Parameter( id_="test/test.value.test6.testD", @@ -366,7 +366,7 @@ def migrate_optional_annotation_data_one_to_many_mapping() -> ( default_value="5", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("int", "5", "int in the range of (0, 10)"), + docstring=ParameterDocstring("int", "5", "int in the range of (0, 10)"), ) mapping = OneToManyMapping(1.0, parameterv1, [parameterv2_a, parameterv2_b, parameterv2_c, parameterv2_d]) @@ -436,7 +436,7 @@ def migrate_required_annotation_data_one_to_many_mapping() -> ( default_value="1.0", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("float", "1.0", ""), + docstring=ParameterDocstring("float", "1.0", ""), ) parameterv2_a = Parameter( id_="test/test.value.test7.testA", @@ -445,7 +445,7 @@ def migrate_required_annotation_data_one_to_many_mapping() -> ( default_value="2", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("int", "2", ""), + docstring=ParameterDocstring("int", "2", ""), ) parameterv2_b = Parameter( @@ -455,7 +455,7 @@ def migrate_required_annotation_data_one_to_many_mapping() -> ( default_value="2.0", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("float", "2.0", ""), + docstring=ParameterDocstring("float", "2.0", ""), ) parameterv2_c = Parameter( id_="test/test.value.test7.testC", @@ -464,7 +464,7 @@ def migrate_required_annotation_data_one_to_many_mapping() -> ( default_value='"value"', assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("string", '"value"', ""), + docstring=ParameterDocstring("string", '"value"', ""), ) parameterv2_d = Parameter( id_="test/test.value.test7.testD", @@ -473,7 +473,7 @@ def migrate_required_annotation_data_one_to_many_mapping() -> ( default_value=None, assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("", "", ""), + docstring=ParameterDocstring("", "", ""), ) parameterv2_e = Parameter( id_="test/test.value.test7.testE", @@ -482,7 +482,7 @@ def migrate_required_annotation_data_one_to_many_mapping() -> ( default_value=None, assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("", "", ""), + docstring=ParameterDocstring("", "", ""), ) parameterv2_f = Parameter( id_="test/test.value.test7.testF", @@ -491,7 +491,7 @@ def migrate_required_annotation_data_one_to_many_mapping() -> ( default_value="3.0", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("float", "3.0", ""), + docstring=ParameterDocstring("float", "3.0", ""), ) mapping = OneToManyMapping( @@ -591,7 +591,7 @@ def migrate_omitted_annotation_data_one_to_many_mapping() -> ( default_value="1", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("int", "1", ""), + docstring=ParameterDocstring("int", "1", ""), ) parameterv2_a = Parameter( id_="test/test.value.test8.testA", @@ -600,7 +600,7 @@ def migrate_omitted_annotation_data_one_to_many_mapping() -> ( default_value="2", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("int", "2", ""), + docstring=ParameterDocstring("int", "2", ""), ) parameterv2_b = Parameter( @@ -610,7 +610,7 @@ def migrate_omitted_annotation_data_one_to_many_mapping() -> ( default_value="2.0", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("float", "2.0", ""), + docstring=ParameterDocstring("float", "2.0", ""), ) parameterv2_c = Parameter( id_="test/test.value.test8.testC", @@ -619,7 +619,7 @@ def migrate_omitted_annotation_data_one_to_many_mapping() -> ( default_value='"value"', assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("string", '"value"', ""), + docstring=ParameterDocstring("string", '"value"', ""), ) parameterv2_d = Parameter( id_="test/test.value.test8.testD", @@ -628,7 +628,7 @@ def migrate_omitted_annotation_data_one_to_many_mapping() -> ( default_value="None", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("", "None", ""), + docstring=ParameterDocstring("", "None", ""), ) parameterv2_e = Parameter( id_="test/test.value.test8.testE", @@ -637,7 +637,7 @@ def migrate_omitted_annotation_data_one_to_many_mapping() -> ( default_value=None, assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("", "", ""), + docstring=ParameterDocstring("", "", ""), ) mapping = OneToManyMapping( @@ -721,7 +721,7 @@ def migrate_constant_annotation_data_duplicated() -> ( default_value="'this is a string'", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("str", "this is a string", ""), + docstring=ParameterDocstring("str", "this is a string", ""), ) parameterv1_2 = Parameter( id_="test/test.value.duplicate.testA_2", @@ -730,7 +730,7 @@ def migrate_constant_annotation_data_duplicated() -> ( default_value="'this is a string'", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("str", "this is a string", ""), + docstring=ParameterDocstring("str", "this is a string", ""), ) parameterv2 = Parameter( id_="test/test.value.duplicate.testB", @@ -739,7 +739,7 @@ def migrate_constant_annotation_data_duplicated() -> ( default_value="'test string'", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("str", "'test string'", ""), + docstring=ParameterDocstring("str", "'test string'", ""), ) mapping = ManyToOneMapping(1.0, [parameterv1, parameterv1_2], parameterv2) annotation = ConstantAnnotation( @@ -786,7 +786,7 @@ def migrate_omitted_annotation_data_duplicated() -> ( default_value="True", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("bool", "True", ""), + docstring=ParameterDocstring("bool", "True", ""), ) parameterv1_2 = Parameter( id_="test/test.value.duplicate2.testA_2", @@ -795,7 +795,7 @@ def migrate_omitted_annotation_data_duplicated() -> ( default_value="True", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("bool", "True", ""), + docstring=ParameterDocstring("bool", "True", ""), ) parameterv2 = Parameter( id_="test/test.value.duplicate2.testB", @@ -804,7 +804,7 @@ def migrate_omitted_annotation_data_duplicated() -> ( default_value="True", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("bool", "True", ""), + docstring=ParameterDocstring("bool", "True", ""), ) annotation = OmittedAnnotation( target="test/test.value.duplicate2.testA", @@ -848,7 +848,7 @@ def migrate_optional_annotation_data_duplicated() -> ( default_value="True", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("bool", "True", ""), + docstring=ParameterDocstring("bool", "True", ""), ) parameterv1_2 = Parameter( id_="test/test.value.duplicate3.testA_2", @@ -857,7 +857,7 @@ def migrate_optional_annotation_data_duplicated() -> ( default_value="True", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("bool", "True", ""), + docstring=ParameterDocstring("bool", "True", ""), ) parameterv2 = Parameter( id_="test/test.value.duplicate3.testB", @@ -866,7 +866,7 @@ def migrate_optional_annotation_data_duplicated() -> ( default_value="False", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("bool", "False", ""), + docstring=ParameterDocstring("bool", "False", ""), ) annotation = OptionalAnnotation( target="test/test.value.duplicate3.testA", @@ -916,7 +916,7 @@ def migrate_required_annotation_data_duplicated() -> ( default_value="'test'", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("str", "'test'", ""), + docstring=ParameterDocstring("str", "'test'", ""), ) parameterv1_2 = Parameter( id_="test/test.value.duplicate4.testA_2", @@ -925,7 +925,7 @@ def migrate_required_annotation_data_duplicated() -> ( default_value="'test'", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("str", "'test'", ""), + docstring=ParameterDocstring("str", "'test'", ""), ) parameterv2 = Parameter( id_="test/test.value.duplicate4.testB", @@ -934,7 +934,7 @@ def migrate_required_annotation_data_duplicated() -> ( default_value="'test_string'", assigned_by=ParameterAssignment.POSITION_OR_NAME, is_public=True, - documentation=ParameterDocumentation("str", "'test_string'", ""), + docstring=ParameterDocstring("str", "'test_string'", ""), ) annotation = RequiredAnnotation( target="test/test.value.duplicate4.testA", diff --git a/tests/migration/test_migration.py b/tests/migration/test_migration.py index 70e0b7e0..065db184 100644 --- a/tests/migration/test_migration.py +++ b/tests/migration/test_migration.py @@ -13,9 +13,9 @@ from library_analyzer.processing.api.model import ( API, Class, - ClassDocumentation, + ClassDocstring, Function, - FunctionDocumentation, + FunctionDocstring, ) from library_analyzer.processing.migration import Migration from library_analyzer.processing.migration.annotations._migrate_move_annotation import ( @@ -199,7 +199,7 @@ def test_migrate_all_annotations() -> None: migration = Migration(annotation_store, mappings) migration.migrate_annotations() - unsure_migrated_annotations = migration.unsure_migrated_annotation_store.to_json() + unsure_migrated_annotations = migration.unsure_migrated_annotation_store.to_dict() assert len(unsure_migrated_annotations["todoAnnotations"]) == 3 migration.migrated_annotation_store.todoAnnotations.extend( migration.unsure_migrated_annotation_store.todoAnnotations, @@ -229,15 +229,15 @@ def test_migrate_command_and_both_annotation_stores() -> None: encoding="utf-8", ) as unsure_annotationsv2_file: apiv1_json = json.load(apiv1_file) - apiv1 = API.from_json(apiv1_json) + apiv1 = API.from_dict(apiv1_json) apiv2_json = json.load(apiv2_file) - apiv2 = API.from_json(apiv2_json) + apiv2 = API.from_dict(apiv2_json) annotationsv1_json = json.load(annotationsv1_file) - annotationsv1 = AnnotationStore.from_json(annotationsv1_json) + annotationsv1 = AnnotationStore.from_dict(annotationsv1_json) expected_annotationsv2_json = json.load(annotationsv2_file) - annotationsv2 = AnnotationStore.from_json(expected_annotationsv2_json) + annotationsv2 = AnnotationStore.from_dict(expected_annotationsv2_json) expected_unsure_annotationsv2_json = json.load(unsure_annotationsv2_file) - unsure_annotationsv2 = AnnotationStore.from_json(expected_unsure_annotationsv2_json) + unsure_annotationsv2 = AnnotationStore.from_dict(expected_unsure_annotationsv2_json) differ = SimpleDiffer(None, [], apiv1, apiv2) api_mapping = APIMapping(apiv1, apiv2, differ, threshold_of_similarity_between_mappings=0.3) @@ -314,7 +314,7 @@ def test_handle_duplicates() -> None: superclasses=[], is_public=True, reexported_by=[], - documentation=ClassDocumentation(), + docstring=ClassDocstring(), code="", instance_attributes=[], ) @@ -343,10 +343,12 @@ def test_handle_duplicates() -> None: migration.migrate_annotations() store = AnnotationStore() store.add_annotation( - TodoAnnotation.from_json( + TodoAnnotation.from_dict( { "authors": ["", "migration"], - "comment": "Conflicting Attribute during migration: {'newTodo': 'lightbringer'}, {'newTodo': 'todo'}", + "comment": ( + "Conflicting attribute found during migration: {'newTodo': 'lightbringer'}, {'newTodo': 'todo'}" + ), "newTodo": "darkage", "reviewResult": "unsure", "reviewers": [""], @@ -354,11 +356,11 @@ def test_handle_duplicates() -> None: }, ), ) - migrated_annotation_store = migration.migrated_annotation_store.to_json() + migrated_annotation_store = migration.migrated_annotation_store.to_dict() todo_annotations = migrated_annotation_store.pop("todoAnnotations") migrated_annotation_store["todoAnnotations"] = {} assert ( - migrated_annotation_store == migration.unsure_migrated_annotation_store.to_json() == AnnotationStore().to_json() + migrated_annotation_store == migration.unsure_migrated_annotation_store.to_dict() == AnnotationStore().to_dict() ) assert len(todo_annotations) == 1 todo_values = ["darkage", "lightbringer", "todo"] @@ -366,11 +368,13 @@ def test_handle_duplicates() -> None: todo_values.remove(todo_annotations[classv2.id].pop("newTodo")) assert todo_annotations[classv2.id] == { "authors": ["", "migration"], - "comment": "Conflicting Attribute during migration: {'newTodo': '" - + todo_values[0] - + "'}, {'newTodo': '" - + todo_values[1] - + "'}", + "comment": ( + "Conflicting attribute found during migration: {'newTodo': '" + + todo_values[0] + + "'}, {'newTodo': '" + + todo_values[1] + + "'}" + ), "reviewResult": "unsure", "reviewers": [""], "target": "test/test.duplicate/TestClass", @@ -395,7 +399,7 @@ def test_was_moved() -> None: results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ) assert _was_moved(function, function, move_annotation) is False @@ -410,7 +414,7 @@ def test_was_moved() -> None: results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ), move_annotation, @@ -428,7 +432,7 @@ def test_was_moved() -> None: results=[], is_public=True, reexported_by=[], - documentation=FunctionDocumentation(), + docstring=FunctionDocstring(), code="", ), move_annotation,