From 4d3955f6c11b7ce4bb289002a61091abc26d5a2e Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Sat, 9 Dec 2023 18:16:01 -0800 Subject: [PATCH 001/112] python package, cli, and old ml model spec validation --- .gitignore | 3 +- stac_model/.dockerignore | 38 + stac_model/.editorconfig | 24 + stac_model/.github/.stale.yml | 17 + .../.github/ISSUE_TEMPLATE/bug_report.md | 42 + stac_model/.github/ISSUE_TEMPLATE/config.yml | 3 + .../.github/ISSUE_TEMPLATE/feature_request.md | 23 + stac_model/.github/ISSUE_TEMPLATE/question.md | 28 + stac_model/.github/PULL_REQUEST_TEMPLATE.md | 31 + stac_model/.github/dependabot.yml | 35 + stac_model/.github/release-drafter.yml | 28 + stac_model/.github/workflows/build.yml | 44 + stac_model/.github/workflows/greetings.yml | 16 + .../.github/workflows/release-drafter.yml | 16 + stac_model/.gitignore | 1032 +++++++++++++ stac_model/.pre-commit-config.yaml | 13 + stac_model/AUTHORS.md | 9 + stac_model/CHANGELOG.md | 0 stac_model/CONTRIBUTING.md | 96 ++ stac_model/LICENSE | 207 +++ stac_model/Makefile | 94 ++ stac_model/README.md | 161 ++ stac_model/SECURITY.md | 29 + stac_model/docker/Dockerfile | 25 + stac_model/docker/README.md | 47 + stac_model/docs/.gitkeep | 0 stac_model/example.json | 53 + stac_model/model_metadata.py | 102 ++ stac_model/poetry.lock | 1355 +++++++++++++++++ stac_model/pyproject.toml | 201 +++ stac_model/requirements.txt | 12 + stac_model/stac_model/__init__.py | 8 + stac_model/stac_model/__main__.py | 69 + stac_model/stac_model/schema.py | 192 +++ stac_model/tests/test_schema.py | 68 + 35 files changed, 4120 insertions(+), 1 deletion(-) create mode 100644 stac_model/.dockerignore create mode 100644 stac_model/.editorconfig create mode 100644 stac_model/.github/.stale.yml create mode 100644 stac_model/.github/ISSUE_TEMPLATE/bug_report.md create mode 100644 stac_model/.github/ISSUE_TEMPLATE/config.yml create mode 100644 stac_model/.github/ISSUE_TEMPLATE/feature_request.md create mode 100644 stac_model/.github/ISSUE_TEMPLATE/question.md create mode 100644 stac_model/.github/PULL_REQUEST_TEMPLATE.md create mode 100644 stac_model/.github/dependabot.yml create mode 100644 stac_model/.github/release-drafter.yml create mode 100644 stac_model/.github/workflows/build.yml create mode 100644 stac_model/.github/workflows/greetings.yml create mode 100644 stac_model/.github/workflows/release-drafter.yml create mode 100644 stac_model/.gitignore create mode 100644 stac_model/.pre-commit-config.yaml create mode 100644 stac_model/AUTHORS.md create mode 100644 stac_model/CHANGELOG.md create mode 100644 stac_model/CONTRIBUTING.md create mode 100644 stac_model/LICENSE create mode 100644 stac_model/Makefile create mode 100644 stac_model/README.md create mode 100644 stac_model/SECURITY.md create mode 100644 stac_model/docker/Dockerfile create mode 100644 stac_model/docker/README.md create mode 100644 stac_model/docs/.gitkeep create mode 100644 stac_model/example.json create mode 100644 stac_model/model_metadata.py create mode 100644 stac_model/poetry.lock create mode 100644 stac_model/pyproject.toml create mode 100644 stac_model/requirements.txt create mode 100644 stac_model/stac_model/__init__.py create mode 100644 stac_model/stac_model/__main__.py create mode 100644 stac_model/stac_model/schema.py create mode 100644 stac_model/tests/test_schema.py diff --git a/.gitignore b/.gitignore index b68addb..654b340 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,3 @@ /package-lock.json -/node_modules \ No newline at end of file +/node_modules +.vscode diff --git a/stac_model/.dockerignore b/stac_model/.dockerignore new file mode 100644 index 0000000..6be1e24 --- /dev/null +++ b/stac_model/.dockerignore @@ -0,0 +1,38 @@ +# Git +.git +.gitignore +.github + +# Docker +.dockerignore + +# IDE +.idea +.vscode + +# Byte-compiled / optimized / DLL files +__pycache__/ +**/__pycache__/ +*.pyc +*.pyo +*.pyd +.Python +*.py[cod] +*$py.class +.pytest_cache/ +..mypy_cache/ + +# poetry +.venv + +# C extensions +*.so + +# Virtual environment +.venv +venv + +.DS_Store +.AppleDouble +.LSOverride +._* diff --git a/stac_model/.editorconfig b/stac_model/.editorconfig new file mode 100644 index 0000000..7f578f1 --- /dev/null +++ b/stac_model/.editorconfig @@ -0,0 +1,24 @@ +# Check http://editorconfig.org for more information +# This is the main config file for this project: +root = true + +[*] +charset = utf-8 +end_of_line = lf +insert_final_newline = true +indent_style = space +indent_size = 2 +trim_trailing_whitespace = true + +[*.{py, pyi}] +indent_style = space +indent_size = 4 + +[Makefile] +indent_style = tab + +[*.md] +trim_trailing_whitespace = false + +[*.{diff,patch}] +trim_trailing_whitespace = false diff --git a/stac_model/.github/.stale.yml b/stac_model/.github/.stale.yml new file mode 100644 index 0000000..159f419 --- /dev/null +++ b/stac_model/.github/.stale.yml @@ -0,0 +1,17 @@ +# Number of days of inactivity before an issue becomes stale +daysUntilStale: 120 +# Number of days of inactivity before a stale issue is closed +daysUntilClose: 30 +# Issues with these labels will never be considered stale +exemptLabels: + - pinned + - security +# Label to use when marking an issue as stale +staleLabel: stale +# Comment to post when marking an issue as stale. Set to `false` to disable +markComment: > + This issue has been automatically marked as stale because it has not had + recent activity. It will be closed if no further activity occurs. Thank you + for your contributions. +# Comment to post when closing a stale issue. Set to `false` to disable +closeComment: false diff --git a/stac_model/.github/ISSUE_TEMPLATE/bug_report.md b/stac_model/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 0000000..996ff26 --- /dev/null +++ b/stac_model/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,42 @@ +--- +name: ":bug: Bug report" +about: If something isn't working 🔧 +title: '' +labels: bug, needs-triage +assignees: +--- + +## :bug: Bug Report + + + +## :microscope: How To Reproduce + +Steps to reproduce the behaviour: + +1. ... + +### Code sample + + + +### Environment + +* OS: [e.g. Linux / Windows / macOS] +* Python version, get it with: + +```bash +python --version +``` + +### Screenshots + + + +## :chart_with_upwards_trend: Expected behavior + + + +## :paperclip: Additional context + + diff --git a/stac_model/.github/ISSUE_TEMPLATE/config.yml b/stac_model/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 0000000..8f2da54 --- /dev/null +++ b/stac_model/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,3 @@ +# Configuration: https://help.github.com/en/github/building-a-strong-community/configuring-issue-templates-for-your-repository + +blank_issues_enabled: false diff --git a/stac_model/.github/ISSUE_TEMPLATE/feature_request.md b/stac_model/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 0000000..41bf0cd --- /dev/null +++ b/stac_model/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,23 @@ +--- +name: ":rocket: Feature request" +about: Suggest an idea for this project 🏖 +title: '' +labels: enhancement, needs-triage +assignees: +--- + +## :rocket: Feature Request + + + +## :sound: Motivation + + + +## :satellite: Alternatives + + + +## :paperclip: Additional context + + diff --git a/stac_model/.github/ISSUE_TEMPLATE/question.md b/stac_model/.github/ISSUE_TEMPLATE/question.md new file mode 100644 index 0000000..6ac7668 --- /dev/null +++ b/stac_model/.github/ISSUE_TEMPLATE/question.md @@ -0,0 +1,28 @@ +--- +name: "red_question_mark Question" +about: Ask a question about this project 🎓 +title: '' +labels: question, needs-triage +assignees: +--- + +## Checklist + + + +- [ ] I've searched the project's [`issues`][1], looking for the following terms: + - [...] + +## :question: Question + + + +How can I [...]? + +Is it possible to [...]? + +## :paperclip: Additional context + + + +[1]: https://github.com/rbavery/stac-model/issues diff --git a/stac_model/.github/PULL_REQUEST_TEMPLATE.md b/stac_model/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000..5e3bd6d --- /dev/null +++ b/stac_model/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,31 @@ +## Description + + + +## Related Issue + + + +## Type of Change + + + +- [ ] :books: Examples, docs, tutorials or dependencies update; +- [ ] :wrench: Bug fix (non-breaking change which fixes an issue); +- [ ] :clinking_glasses: Improvement (non-breaking change which improves an existing feature); +- [ ] :rocket: New feature (non-breaking change which adds functionality); +- [ ] :boom: Breaking change (fix or feature that would cause existing functionality to change); +- [ ] :closed_lock_with_key: Security fix. + +## Checklist + + + +- [ ] I've read the [`CODE_OF_CONDUCT.md`][1] document; +- [ ] I've read the [`CONTRIBUTING.md`][2] guide; +- [ ] I've updated the code style using `make codestyle`; +- [ ] I've written tests for all new methods and classes that I created; +- [ ] I've written the docstring in `Google` format for all the methods and classes that I used. + +[1]: https://github.com/rbavery/stac-model/blob/master/CODE_OF_CONDUCT.md +[2]: https://github.com/rbavery/stac-model/blob/master/CONTRIBUTING.md diff --git a/stac_model/.github/dependabot.yml b/stac_model/.github/dependabot.yml new file mode 100644 index 0000000..8f872f1 --- /dev/null +++ b/stac_model/.github/dependabot.yml @@ -0,0 +1,35 @@ +# Configuration: https://dependabot.com/docs/config-file/ +# Docs: https://docs.github.com/en/github/administering-a-repository/keeping-your-dependencies-updated-automatically + +version: 2 + +updates: + - package-ecosystem: "pip" + directory: "/" + schedule: + interval: "monthly" + allow: + - dependency-type: "all" + commit-message: + prefix: ":arrow_up:" + open-pull-requests-limit: 5 + + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "monthly" + allow: + - dependency-type: "all" + commit-message: + prefix: ":arrow_up:" + open-pull-requests-limit: 5 + + - package-ecosystem: "docker" + directory: "/docker" + schedule: + interval: "monthly" + allow: + - dependency-type: "all" + commit-message: + prefix: ":arrow_up:" + open-pull-requests-limit: 5 diff --git a/stac_model/.github/release-drafter.yml b/stac_model/.github/release-drafter.yml new file mode 100644 index 0000000..8ad8b33 --- /dev/null +++ b/stac_model/.github/release-drafter.yml @@ -0,0 +1,28 @@ +# Release drafter configuration https://github.com/release-drafter/release-drafter#configuration +# Emojis were chosen to match the https://gitmoji.dev/ + +name-template: "v$NEXT_PATCH_VERSION" +tag-template: "v$NEXT_PATCH_VERSION" + +categories: + - title: ":rocket: Features" + labels: [enhancement, feature] + - title: ":wrench: Fixes & Refactoring" + labels: [bug, refactoring, bugfix, fix] + - title: ":package: Build System & CI/CD" + labels: [build, ci, testing] + - title: ":boom: Breaking Changes" + labels: [breaking] + - title: ":memo: Documentation" + labels: [documentation] + - title: ":arrow_up: Dependencies updates" + labels: [dependencies] + +template: | + ## What's Changed + + $CHANGES + + ## :busts_in_silhouette: List of contributors + + $CONTRIBUTORS diff --git a/stac_model/.github/workflows/build.yml b/stac_model/.github/workflows/build.yml new file mode 100644 index 0000000..1775cfb --- /dev/null +++ b/stac_model/.github/workflows/build.yml @@ -0,0 +1,44 @@ + +# UPDATEME to suit your project's workflow +name: build + +on: [push, pull_request] + +jobs: + build: + runs-on: ubuntu-latest + strategy: + matrix: + python-version: ["3.10", "3.11"] + + steps: + - uses: actions/checkout@v2 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v2.2.2 + with: + python-version: ${{ matrix.python-version }} + + - name: Install poetry + run: make poetry-download + + - name: Set up cache + uses: actions/cache@v2.1.6 + with: + path: .venv + key: venv-${{ matrix.python-version }}-${{ hashFiles('pyproject.toml') }}-${{ hashFiles('poetry.lock') }} + - name: Install dependencies + run: | + poetry config virtualenvs.in-project true + poetry install + + - name: Run style checks + run: | + make check-codestyle + + - name: Run tests + run: | + make test + + - name: Run safety checks + run: | + make check-safety diff --git a/stac_model/.github/workflows/greetings.yml b/stac_model/.github/workflows/greetings.yml new file mode 100644 index 0000000..a1f6e89 --- /dev/null +++ b/stac_model/.github/workflows/greetings.yml @@ -0,0 +1,16 @@ +name: Greetings + +on: [pull_request, issues] + +jobs: + greeting: + runs-on: ubuntu-latest + steps: + - uses: actions/first-interaction@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + pr-message: 'Hello @${{ github.actor }}, thank you for submitting a PR! We will respond as soon as possible.' + issue-message: | + Hello @${{ github.actor }}, thank you for your interest in our work! + + If this is a bug report, please provide screenshots and **minimum viable code to reproduce your issue**, otherwise we can not help you. diff --git a/stac_model/.github/workflows/release-drafter.yml b/stac_model/.github/workflows/release-drafter.yml new file mode 100644 index 0000000..0c06b2b --- /dev/null +++ b/stac_model/.github/workflows/release-drafter.yml @@ -0,0 +1,16 @@ +name: Release Drafter + +on: + push: + # branches to consider in the event; optional, defaults to all + branches: + - main + +jobs: + update_release_draft: + runs-on: ubuntu-latest + steps: + # Drafts your next Release notes as Pull Requests are merged into "master" + - uses: release-drafter/release-drafter@v5.15.0 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/stac_model/.gitignore b/stac_model/.gitignore new file mode 100644 index 0000000..e4eed7d --- /dev/null +++ b/stac_model/.gitignore @@ -0,0 +1,1032 @@ +### ArchLinuxPackages ### +*.tar +*.tar.* +*.jar +*.exe +*.msi +*.zip +*.tgz +*.log +*.log.* +*.sig + +pkg/ +src/ + +### C ### +# Prerequisites +*.d + +# Object files +*.o +*.ko +*.obj +*.elf + +# Linker output +*.ilk +*.map +*.exp + +# Precompiled Headers +*.gch +*.pch + +# Libraries +*.lib +*.a +*.la +*.lo + +# Shared objects (inc. Windows DLLs) +*.dll +*.so +*.so.* +*.dylib + +# Executables +*.out +*.app +*.i*86 +*.x86_64 +*.hex + +# Debug files +*.dSYM/ +*.su +*.idb +*.pdb + +# Kernel Module Compile Results +*.mod* +*.cmd +.tmp_versions/ +modules.order +Module.symvers +Mkfile.old +dkms.conf + +### certificates ### +*.pem +*.key +*.crt +*.cer +*.der +*.priv + +### Database ### +*.accdb +*.db +*.dbf +*.mdb +*.sqlite3 +*.db-shm +*.db-wal + +### Diff ### +*.patch +*.diff + +### Django ### +*.pot +*.pyc +__pycache__/ +local_settings.py +db.sqlite3 +db.sqlite3-journal +media + +# If your build process includes running collectstatic, then you probably don't need or want to include staticfiles/ +# in your Git repository. Update and uncomment the following line accordingly. +# /staticfiles/ + +### Django.Python Stack ### +# Byte-compiled / optimized / DLL files +*.py[cod] +*$py.class + +# C extensions + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo + +# Django stuff: + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/#use-with-ide +.pdm.toml + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ + +### Git ### +# Created by git for backups. To disable backups in Git: +# $ git config --global mergetool.keepBackup false +*.orig + +# Created by git when using merge tools for conflicts +*.BACKUP.* +*.BASE.* +*.LOCAL.* +*.REMOTE.* +*_BACKUP_*.txt +*_BASE_*.txt +*_LOCAL_*.txt +*_REMOTE_*.txt + +### Linux ### +*~ + +# temporary files which can be created if a process still has a handle open of a deleted file +.fuse_hidden* + +# KDE directory preferences +.directory + +# Linux trash folder which might appear on any partition or disk +.Trash-* + +# .nfs files are created when an open file is removed but is still being accessed +.nfs* + +### MicrosoftOffice ### +*.tmp + +# Word temporary +~$*.doc* + +# Word Auto Backup File +Backup of *.doc* + +# Excel temporary +~$*.xls* + +# Excel Backup File +*.xlk + +# PowerPoint temporary +~$*.ppt* + +# Visio autosave temporary files +*.~vsd* + +### OSX ### +# General +.DS_Store +.AppleDouble +.LSOverride + +# Icon must end with two \r +Icon + +# Thumbnails +._* + +# Files that might appear in the root of a volume +.DocumentRevisions-V100 +.fseventsd +.Spotlight-V100 +.TemporaryItems +.Trashes +.VolumeIcon.icns +.com.apple.timemachine.donotpresent + +# Directories potentially created on remote AFP share +.AppleDB +.AppleDesktop +Network Trash Folder +Temporary Items +.apdisk + +### PyCharm ### +# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider +# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 + +# User-specific stuff +.idea/**/workspace.xml +.idea/**/tasks.xml +.idea/**/usage.statistics.xml +.idea/**/dictionaries +.idea/**/shelf + +# AWS User-specific +.idea/**/aws.xml + +# Generated files +.idea/**/contentModel.xml + +# Sensitive or high-churn files +.idea/**/dataSources/ +.idea/**/dataSources.ids +.idea/**/dataSources.local.xml +.idea/**/sqlDataSources.xml +.idea/**/dynamic.xml +.idea/**/uiDesigner.xml +.idea/**/dbnavigator.xml + +# Gradle +.idea/**/gradle.xml +.idea/**/libraries + +# Gradle and Maven with auto-import +# When using Gradle or Maven with auto-import, you should exclude module files, +# since they will be recreated, and may cause churn. Uncomment if using +# auto-import. +# .idea/artifacts +# .idea/compiler.xml +# .idea/jarRepositories.xml +# .idea/modules.xml +# .idea/*.iml +# .idea/modules +# *.iml +# *.ipr + +# CMake +cmake-build-*/ + +# Mongo Explorer plugin +.idea/**/mongoSettings.xml + +# File-based project format +*.iws + +# IntelliJ +out/ + +# mpeltonen/sbt-idea plugin +.idea_modules/ + +# JIRA plugin +atlassian-ide-plugin.xml + +# Cursive Clojure plugin +.idea/replstate.xml + +# SonarLint plugin +.idea/sonarlint/ + +# Crashlytics plugin (for Android Studio and IntelliJ) +com_crashlytics_export_strings.xml +crashlytics.properties +crashlytics-build.properties +fabric.properties + +# Editor-based Rest Client +.idea/httpRequests + +# Android studio 3.1+ serialized cache file +.idea/caches/build_file_checksums.ser + +### PyCharm Patch ### +# Comment Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-215987721 + +# *.iml +# modules.xml +# .idea/misc.xml +# *.ipr + +# Sonarlint plugin +# https://plugins.jetbrains.com/plugin/7973-sonarlint +.idea/**/sonarlint/ + +# SonarQube Plugin +# https://plugins.jetbrains.com/plugin/7238-sonarqube-community-plugin +.idea/**/sonarIssues.xml + +# Markdown Navigator plugin +# https://plugins.jetbrains.com/plugin/7896-markdown-navigator-enhanced +.idea/**/markdown-navigator.xml +.idea/**/markdown-navigator-enh.xml +.idea/**/markdown-navigator/ + +# Cache file creation bug +# See https://youtrack.jetbrains.com/issue/JBR-2257 +.idea/$CACHE_FILE$ + +# CodeStream plugin +# https://plugins.jetbrains.com/plugin/12206-codestream +.idea/codestream.xml + +# Azure Toolkit for IntelliJ plugin +# https://plugins.jetbrains.com/plugin/8053-azure-toolkit-for-intellij +.idea/**/azureSettings.xml + +### Python ### +# Byte-compiled / optimized / DLL files + +# C extensions + +# Distribution / packaging + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. + +# Installer logs + +# Unit test / coverage reports + +# Translations + +# Django stuff: + +# Flask stuff: + +# Scrapy stuff: + +# Sphinx documentation + +# PyBuilder + +# Jupyter Notebook + +# IPython + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/#use-with-ide + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm + +# Celery stuff + +# SageMath parsed files + +# Environments + +# Spyder project settings + +# Rope project settings + +# mkdocs documentation + +# mypy + +# Pyre type checker + +# pytype static type analyzer + +# Cython debug symbols + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. + +### Python Patch ### +# Poetry local configuration file - https://python-poetry.org/docs/configuration/#local-configuration +poetry.toml + +# ruff +.ruff_cache/ + +# LSP config files +pyrightconfig.json + +### Spreadsheet ### +*.xlr +*.xls +*.xlsx + +### SSH ### +**/.ssh/id_* +**/.ssh/*_id_* +**/.ssh/known_hosts + +### Vim ### +# Swap +[._]*.s[a-v][a-z] +!*.svg # comment out if you don't need vector files +[._]*.sw[a-p] +[._]s[a-rt-v][a-z] +[._]ss[a-gi-z] +[._]sw[a-p] + +# Session +Session.vim +Sessionx.vim + +# Temporary +.netrwhist +# Auto-generated tag files +tags +# Persistent undo +[._]*.un~ + +### VisualStudioCode ### +.vscode/* +!.vscode/settings.json +!.vscode/tasks.json +!.vscode/launch.json +!.vscode/extensions.json +!.vscode/*.code-snippets + +# Local History for Visual Studio Code +.history/ + +# Built Visual Studio Code Extensions +*.vsix + +### VisualStudioCode Patch ### +# Ignore all local history of files +.history +.ionide + +### Windows ### +# Windows thumbnail cache files +Thumbs.db +Thumbs.db:encryptable +ehthumbs.db +ehthumbs_vista.db + +# Dump file +*.stackdump + +# Folder config file +[Dd]esktop.ini + +# Recycle Bin used on file shares +$RECYCLE.BIN/ + +# Windows Installer files +*.cab +*.msix +*.msm +*.msp + +# Windows shortcuts +*.lnk + +### Zsh ### +# Zsh compiled script + zrecompile backup +*.zwc +*.zwc.old + +# Zsh completion-optimization dumpfile +*zcompdump* + +# Zsh history +.zsh_history + +# Zsh sessions +.zsh_sessions + +# Zsh zcalc history +.zcalc_history + +# A popular plugin manager's files +._zinit +.zinit_lstupd + +# zdharma/zshelldoc tool's files +zsdoc/data + +# robbyrussell/oh-my-zsh/plugins/per-directory-history plugin's files +# (when set-up to store the history in the local directory) +.directory_history + +# MichaelAquilina/zsh-autoswitch-virtualenv plugin's files +# (for Zsh plugins using Python) + +# Zunit tests' output +/tests/_output/* +!/tests/_output/.gitkeep + +### VisualStudio ### +## Ignore Visual Studio temporary files, build results, and +## files generated by popular Visual Studio add-ons. +## +## Get latest from https://github.com/github/gitignore/blob/main/VisualStudio.gitignore + +# User-specific files +*.rsuser +*.suo +*.user +*.userosscache +*.sln.docstates + +# User-specific files (MonoDevelop/Xamarin Studio) +*.userprefs + +# Mono auto generated files +mono_crash.* + +# Build results +[Dd]ebug/ +[Dd]ebugPublic/ +[Rr]elease/ +[Rr]eleases/ +x64/ +x86/ +[Ww][Ii][Nn]32/ +[Aa][Rr][Mm]/ +[Aa][Rr][Mm]64/ +bld/ +[Bb]in/ +[Oo]bj/ +[Ll]og/ +[Ll]ogs/ + +# Visual Studio 2015/2017 cache/options directory +.vs/ +# Uncomment if you have tasks that create the project's static files in wwwroot +#wwwroot/ + +# Visual Studio 2017 auto generated files +Generated\ Files/ + +# MSTest test Results +[Tt]est[Rr]esult*/ +[Bb]uild[Ll]og.* + +# NUnit +*.VisualState.xml +TestResult.xml +nunit-*.xml + +# Build Results of an ATL Project +[Dd]ebugPS/ +[Rr]eleasePS/ +dlldata.c + +# Benchmark Results +BenchmarkDotNet.Artifacts/ + +# .NET Core +project.lock.json +project.fragment.lock.json +artifacts/ + +# ASP.NET Scaffolding +ScaffoldingReadMe.txt + +# StyleCop +StyleCopReport.xml + +# Files built by Visual Studio +*_i.c +*_p.c +*_h.h +*.meta +*.iobj +*.ipdb +*.pgc +*.pgd +*.rsp +*.sbr +*.tlb +*.tli +*.tlh +*.tmp_proj +*_wpftmp.csproj +*.tlog +*.vspscc +*.vssscc +.builds +*.pidb +*.svclog +*.scc + +# Chutzpah Test files +_Chutzpah* + +# Visual C++ cache files +ipch/ +*.aps +*.ncb +*.opendb +*.opensdf +*.sdf +*.cachefile +*.VC.db +*.VC.VC.opendb + +# Visual Studio profiler +*.psess +*.vsp +*.vspx +*.sap + +# Visual Studio Trace Files +*.e2e + +# TFS 2012 Local Workspace +$tf/ + +# Guidance Automation Toolkit +*.gpState + +# ReSharper is a .NET coding add-in +_ReSharper*/ +*.[Rr]e[Ss]harper +*.DotSettings.user + +# TeamCity is a build add-in +_TeamCity* + +# DotCover is a Code Coverage Tool +*.dotCover + +# AxoCover is a Code Coverage Tool +.axoCover/* +!.axoCover/settings.json + +# Coverlet is a free, cross platform Code Coverage Tool +coverage*.json +coverage*.xml +coverage*.info + +# Visual Studio code coverage results +*.coverage +*.coveragexml + +# NCrunch +_NCrunch_* +.*crunch*.local.xml +nCrunchTemp_* + +# MightyMoose +*.mm.* +AutoTest.Net/ + +# Web workbench (sass) +.sass-cache/ + +# Installshield output folder +[Ee]xpress/ + +# DocProject is a documentation generator add-in +DocProject/buildhelp/ +DocProject/Help/*.HxT +DocProject/Help/*.HxC +DocProject/Help/*.hhc +DocProject/Help/*.hhk +DocProject/Help/*.hhp +DocProject/Help/Html2 +DocProject/Help/html + +# Click-Once directory +publish/ + +# Publish Web Output +*.[Pp]ublish.xml +*.azurePubxml +# Note: Comment the next line if you want to checkin your web deploy settings, +# but database connection strings (with potential passwords) will be unencrypted +*.pubxml +*.publishproj + +# Microsoft Azure Web App publish settings. Comment the next line if you want to +# checkin your Azure Web App publish settings, but sensitive information contained +# in these scripts will be unencrypted +PublishScripts/ + +# NuGet Packages +*.nupkg +# NuGet Symbol Packages +*.snupkg +# The packages folder can be ignored because of Package Restore +**/[Pp]ackages/* +# except build/, which is used as an MSBuild target. +!**/[Pp]ackages/build/ +# Uncomment if necessary however generally it will be regenerated when needed +#!**/[Pp]ackages/repositories.config +# NuGet v3's project.json files produces more ignorable files +*.nuget.props +*.nuget.targets + +# Microsoft Azure Build Output +csx/ +*.build.csdef + +# Microsoft Azure Emulator +ecf/ +rcf/ + +# Windows Store app package directories and files +AppPackages/ +BundleArtifacts/ +Package.StoreAssociation.xml +_pkginfo.txt +*.appx +*.appxbundle +*.appxupload + +# Visual Studio cache files +# files ending in .cache can be ignored +*.[Cc]ache +# but keep track of directories ending in .cache +!?*.[Cc]ache/ + +# Others +ClientBin/ +~$* +*.dbmdl +*.dbproj.schemaview +*.jfm +*.pfx +*.publishsettings +orleans.codegen.cs + +# Including strong name files can present a security risk +# (https://github.com/github/gitignore/pull/2483#issue-259490424) +#*.snk + +# Since there are multiple workflows, uncomment next line to ignore bower_components +# (https://github.com/github/gitignore/pull/1529#issuecomment-104372622) +#bower_components/ + +# RIA/Silverlight projects +Generated_Code/ + +# Backup & report files from converting an old project file +# to a newer Visual Studio version. Backup files are not needed, +# because we have git ;-) +_UpgradeReport_Files/ +Backup*/ +UpgradeLog*.XML +UpgradeLog*.htm +ServiceFabricBackup/ +*.rptproj.bak + +# SQL Server files +*.mdf +*.ldf +*.ndf + +# Business Intelligence projects +*.rdl.data +*.bim.layout +*.bim_*.settings +*.rptproj.rsuser +*- [Bb]ackup.rdl +*- [Bb]ackup ([0-9]).rdl +*- [Bb]ackup ([0-9][0-9]).rdl + +# Microsoft Fakes +FakesAssemblies/ + +# GhostDoc plugin setting file +*.GhostDoc.xml + +# Node.js Tools for Visual Studio +.ntvs_analysis.dat +node_modules/ + +# Visual Studio 6 build log +*.plg + +# Visual Studio 6 workspace options file +*.opt + +# Visual Studio 6 auto-generated workspace file (contains which files were open etc.) +*.vbw + +# Visual Studio 6 auto-generated project file (contains which files were open etc.) +*.vbp + +# Visual Studio 6 workspace and project file (working project files containing files to include in project) +*.dsw +*.dsp + +# Visual Studio 6 technical files + +# Visual Studio LightSwitch build output +**/*.HTMLClient/GeneratedArtifacts +**/*.DesktopClient/GeneratedArtifacts +**/*.DesktopClient/ModelManifest.xml +**/*.Server/GeneratedArtifacts +**/*.Server/ModelManifest.xml +_Pvt_Extensions + +# Paket dependency manager +.paket/paket.exe +paket-files/ + +# FAKE - F# Make +.fake/ + +# CodeRush personal settings +.cr/personal + +# Python Tools for Visual Studio (PTVS) + +# Cake - Uncomment if you are using it +# tools/** +# !tools/packages.config + +# Tabs Studio +*.tss + +# Telerik's JustMock configuration file +*.jmconfig + +# BizTalk build output +*.btp.cs +*.btm.cs +*.odx.cs +*.xsd.cs + +# OpenCover UI analysis results +OpenCover/ + +# Azure Stream Analytics local run output +ASALocalRun/ + +# MSBuild Binary and Structured Log +*.binlog + +# NVidia Nsight GPU debugger configuration file +*.nvuser + +# MFractors (Xamarin productivity tool) working folder +.mfractor/ + +# Local History for Visual Studio +.localhistory/ + +# Visual Studio History (VSHistory) files +.vshistory/ + +# BeatPulse healthcheck temp database +healthchecksdb + +# Backup folder for Package Reference Convert tool in Visual Studio 2017 +MigrationBackup/ + +# Ionide (cross platform F# VS Code tools) working folder +.ionide/ + +# Fody - auto-generated XML schema +FodyWeavers.xsd + +# VS Code files for those working on multiple tools +*.code-workspace + +# Local History for Visual Studio Code + +# Windows Installer files from build outputs + +# JetBrains Rider +*.sln.iml + +### VisualStudio Patch ### +# Additional files built by Visual Studio + +# End of https://www.toptal.com/developers/gitignore/api/linux,archlinuxpackages,osx,windows,python,c,django,database,pycharm,visualstudio,visualstudiocode,vim,zsh,git,diff,microsoftoffice,spreadsheet,ssh,certificates diff --git a/stac_model/.pre-commit-config.yaml b/stac_model/.pre-commit-config.yaml new file mode 100644 index 0000000..a905288 --- /dev/null +++ b/stac_model/.pre-commit-config.yaml @@ -0,0 +1,13 @@ +default_language_version: + python: python3.10 + +default_stages: [commit, push] + +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v2.5.0 + hooks: + - id: check-yaml + - id: end-of-file-fixer + exclude: LICENCE +# UPDATEME with additional hooks diff --git a/stac_model/AUTHORS.md b/stac_model/AUTHORS.md new file mode 100644 index 0000000..7a4332f --- /dev/null +++ b/stac_model/AUTHORS.md @@ -0,0 +1,9 @@ +# Credits + +## Main Developer + +- Ryan Avery + +## Contributors + +We don't have contributors... yet. Why not be the first? diff --git a/stac_model/CHANGELOG.md b/stac_model/CHANGELOG.md new file mode 100644 index 0000000..e69de29 diff --git a/stac_model/CONTRIBUTING.md b/stac_model/CONTRIBUTING.md new file mode 100644 index 0000000..89eda26 --- /dev/null +++ b/stac_model/CONTRIBUTING.md @@ -0,0 +1,96 @@ +# How to contribute + +### Project setup + +1. If you don't have `Poetry` installed run: + +```bash +make poetry-install +``` + +> This installs Poetry as a [standalone application][fs1]. If you prefer, you can simply install it inside your virtual environment. + +2. Initialize project dependencies with poetry and install `pre-commit` hooks: + +```bash +make install +make pre-commit-install +``` + +You're then ready to run and test your contributions. + +To activate your `virtualenv` run `poetry shell`. + +Want to know more about Poetry? Check [its documentation][fs2]. + +Poetry's [commands][fs3] let you easily make descriptive python environments and run commands in those environments, like: + +- `poetry add numpy@latest` +- `poetry run pytest` +- `poetry publish --build` + +etc. + +3. Run the codestyle and other checks: + +```bash +make codestyle +``` + +Many checks are configured for this project. Command `make check-codestyle` will run ruff for linting and autoformatting. `make lint` will just run linting. `make check-safety` will look at the security of your code. + +Command `make lint-all` applies all checks. + + +4. Run `pytest` with + +```bash +make test +``` + + +5. Upload your changes to your fork, then make a PR from there to the main repo: + +```bash +git checkout -b your-branch +git add . +git commit -m ":tada: Initial commit" +git remote add origin https://github.com/your-fork/stac-model.git +git push -u origin your-branch +``` + +### Building and releasing stac-model + +Building a new version of `stac-model` contains steps: + +- Bump the version with `poetry version `. You can pass the new version explicitly, or a rule such as `major`, `minor`, or `patch`. For more details, refer to the [Semantic Versions][fs4] standard; +- Make a commit to `GitHub`; +- Create a `GitHub release`; +- And... publish :slight_smile: `poetry publish --build` + +### Before submitting + +Before submitting your code please do the following steps: + +1. Add any changes you want +1. Add tests for the new changes +1. Edit documentation if you have changed something significant +1. Run `make codestyle` to format your changes. +1. Run `make lint-all` to ensure that types, security and docstrings are okay. + +## Other help + +You can contribute by spreading a word about this library. +It would also be a huge contribution to write +a short article on how you are using this project. +You can also share how the ML Model extension does or does +not serve your needs with us in the Github Discussions or raise +Issues for bugs. + +[fs1]: https://github.com/python-poetry/install.python-poetry.org +[fs2]: https://python-poetry.org/docs/ +[fs3]: https://python-poetry.org/docs/cli/#commands +[fs4]: https://semver.org/ + +[li2]: http://www.pydocstyle.org/en/stable/ +[li3]: https://github.com/jsh9/pydoclint diff --git a/stac_model/LICENSE b/stac_model/LICENSE new file mode 100644 index 0000000..938525f --- /dev/null +++ b/stac_model/LICENSE @@ -0,0 +1,207 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, and + distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by the + copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all other + entities that control, are controlled by, or are under common control with + that entity. For the purposes of this definition, "control" means (i) the + power, direct or indirect, to cause the direction or management of such + entity, whether by contract or otherwise, or (ii) ownership of + fifty percent (50%) or more of the outstanding shares, or (iii) beneficial + ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity exercising + permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation source, + and configuration files. + + "Object" form shall mean any form resulting from mechanical transformation + or translation of a Source form, including but not limited to compiled + object code, generated documentation, and conversions to + other media types. + + "Work" shall mean the work of authorship, whether in Source or Object + form, made available under the License, as indicated by a copyright notice + that is included in or attached to the work (an example is provided in the + Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object form, + that is based on (or derived from) the Work and for which the editorial + revisions, annotations, elaborations, or other modifications represent, + as a whole, an original work of authorship. For the purposes of this + License, Derivative Works shall not include works that remain separable + from, or merely link (or bind by name) to the interfaces of, the Work and + Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including the original + version of the Work and any modifications or additions to that Work or + Derivative Works thereof, that is intentionally submitted to Licensor for + inclusion in the Work by the copyright owner or by an individual or + Legal Entity authorized to submit on behalf of the copyright owner. + For the purposes of this definition, "submitted" means any form of + electronic, verbal, or written communication sent to the Licensor or its + representatives, including but not limited to communication on electronic + mailing lists, source code control systems, and issue tracking systems + that are managed by, or on behalf of, the Licensor for the purpose of + discussing and improving the Work, but excluding communication that is + conspicuously marked or otherwise designated in writing by the copyright + owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity on + behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. + + Subject to the terms and conditions of this License, each Contributor + hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, + royalty-free, irrevocable copyright license to reproduce, prepare + Derivative Works of, publicly display, publicly perform, sublicense, + and distribute the Work and such Derivative Works in + Source or Object form. + +3. Grant of Patent License. + + Subject to the terms and conditions of this License, each Contributor + hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, + royalty-free, irrevocable (except as stated in this section) patent + license to make, have made, use, offer to sell, sell, import, and + otherwise transfer the Work, where such license applies only to those + patent claims licensable by such Contributor that are necessarily + infringed by their Contribution(s) alone or by combination of their + Contribution(s) with the Work to which such Contribution(s) was submitted. + If You institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work or a + Contribution incorporated within the Work constitutes direct or + contributory patent infringement, then any patent licenses granted to + You under this License for that Work shall terminate as of the date such + litigation is filed. + +4. Redistribution. + + You may reproduce and distribute copies of the Work or Derivative Works + thereof in any medium, with or without modifications, and in Source or + Object form, provided that You meet the following conditions: + + 1. You must give any other recipients of the Work or Derivative Works a + copy of this License; and + + 2. You must cause any modified files to carry prominent notices stating + that You changed the files; and + + 3. You must retain, in the Source form of any Derivative Works that You + distribute, all copyright, patent, trademark, and attribution notices from + the Source form of the Work, excluding those notices that do not pertain + to any part of the Derivative Works; and + + 4. If the Work includes a "NOTICE" text file as part of its distribution, + then any Derivative Works that You distribute must include a readable copy + of the attribution notices contained within such NOTICE file, excluding + those notices that do not pertain to any part of the Derivative Works, + in at least one of the following places: within a NOTICE text file + distributed as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, within a + display generated by the Derivative Works, if and wherever such + third-party notices normally appear. The contents of the NOTICE file are + for informational purposes only and do not modify the License. + You may add Your own attribution notices within Derivative Works that You + distribute, alongside or as an addendum to the NOTICE text from the Work, + provided that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and may + provide additional or different license terms and conditions for use, + reproduction, or distribution of Your modifications, or for any such + Derivative Works as a whole, provided Your use, reproduction, and + distribution of the Work otherwise complies with the conditions + stated in this License. + +5. Submission of Contributions. + + Unless You explicitly state otherwise, any Contribution intentionally + submitted for inclusion in the Work by You to the Licensor shall be under + the terms and conditions of this License, without any additional + terms or conditions. Notwithstanding the above, nothing herein shall + supersede or modify the terms of any separate license agreement you may + have executed with Licensor regarding such Contributions. + +6. Trademarks. + + This License does not grant permission to use the trade names, trademarks, + service marks, or product names of the Licensor, except as required for + reasonable and customary use in describing the origin of the Work and + reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + + Unless required by applicable law or agreed to in writing, Licensor + provides the Work (and each Contributor provides its Contributions) + on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + either express or implied, including, without limitation, any warranties + or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS + FOR A PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any risks + associated with Your exercise of permissions under this License. + +8. Limitation of Liability. + + In no event and under no legal theory, whether in tort + (including negligence), contract, or otherwise, unless required by + applicable law (such as deliberate and grossly negligent acts) or agreed + to in writing, shall any Contributor be liable to You for damages, + including any direct, indirect, special, incidental, or consequential + damages of any character arising as a result of this License or out of + the use or inability to use the Work (including but not limited to damages + for loss of goodwill, work stoppage, computer failure or malfunction, + or any and all other commercial damages or losses), even if such + Contributor has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + + While redistributing the Work or Derivative Works thereof, You may choose + to offer, and charge a fee for, acceptance of support, warranty, + indemnity, or other liability obligations and/or rights consistent with + this License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf of any + other Contributor, and only if You agree to indemnify, defend, and hold + each Contributor harmless for any liability incurred by, or claims + asserted against, such Contributor by reason of your accepting any such + warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + + To apply the Apache License to your work, attach the following boilerplate + notice, with the fields enclosed by brackets "[]" replaced with your own + identifying information. (Don't include the brackets!) The text should be + enclosed in the appropriate comment syntax for the file format. We also + recommend that a file or class name and description of purpose be included + on the same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2023 Ryan Avery + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + or implied. See the License for the specific language governing + permissions and limitations under the License. diff --git a/stac_model/Makefile b/stac_model/Makefile new file mode 100644 index 0000000..90b3c16 --- /dev/null +++ b/stac_model/Makefile @@ -0,0 +1,94 @@ +#* Variables +SHELL := /usr/bin/env bash +PYTHON := python +PYTHONPATH := `pwd` + +#* Poetry +.PHONY: poetry-install +poetry-install: + curl -sSL https://install.python-poetry.org | $(PYTHON) - + +.PHONY: poetry-remove +poetry-remove: + curl -sSL https://install.python-poetry.org | $(PYTHON) - --uninstall + +.PHONY: poetry-plugins +poetry-plugins: + poetry self add poetry-plugin-up + + +#* Installation +.PHONY: install +install: + poetry lock -n && poetry export --without-hashes > requirements.txt + poetry install -n + -poetry run mypy --install-types --non-interactive ./ + +.PHONY: pre-commit-install +pre-commit-install: + poetry run pre-commit install + + +#* Formatters +.PHONY: codestyle +codestyle: + poetry run ruff format --config=pyproject.toml stac_model tests + +.PHONY: format +format: codestyle + +#* Linting +.PHONY: test +test: + PYTHONPATH=$(PYTHONPATH) poetry run pytest -c pyproject.toml --cov-report=html --cov=stac_model tests/ + +.PHONY: mypy +mypy: + poetry run mypy --config-file pyproject.toml ./ + +.PHONY: check-safety +check-safety: + poetry check + poetry run safety check --full-report + poetry run bandit -ll --recursive stac_model tests + +.PHONY: lint +lint: + poetry run ruff --config=pyproject.toml ./ + poetry run pydocstyle --count --config=pyproject.toml ./ + poetry run pydoclint --config=pyproject.toml ./ + +.PHONY: lint-all +lint: test lint mypy check-safety + +.PHONY: update-dev-deps +update-dev-deps: + poetry up --only=dev-dependencies --latest + +#* Cleaning +.PHONY: pycache-remove +pycache-remove: + find . | grep -E "(__pycache__|\.pyc|\.pyo$$)" | xargs rm -rf + +.PHONY: dsstore-remove +dsstore-remove: + find . | grep -E ".DS_Store" | xargs rm -rf + +.PHONY: mypycache-remove +mypycache-remove: + find . | grep -E ".mypy_cache" | xargs rm -rf + +.PHONY: ipynbcheckpoints-remove +ipynbcheckpoints-remove: + find . | grep -E ".ipynb_checkpoints" | xargs rm -rf + +.PHONY: pytestcache-remove +pytestcache-remove: + find . | grep -E ".pytest_cache" | xargs rm -rf + +.PHONY: build-remove +build-remove: + rm -rf build/ + +.PHONY: cleanup +cleanup: pycache-remove dsstore-remove mypycache-remove ipynbcheckpoints-remove pytestcache-remove diff --git a/stac_model/README.md b/stac_model/README.md new file mode 100644 index 0000000..1f37fdd --- /dev/null +++ b/stac_model/README.md @@ -0,0 +1,161 @@ +# stac-model + +
+ +[![Python support][bp1]][bp2] +[![PyPI Release][bp3]][bp2] +[![Repository][bscm1]][bp4] +[![Releases][bscm2]][bp5] +[![Docs][bdoc1]][bdoc2] + +[![Contributions Welcome][bp8]][bp9] + +[![Poetry][bp11]][bp12] +[![Pre-commit][bp15]][bp16] +[![Semantic versions][blic3]][bp5] +[![Pipelines][bscm6]][bscm7] + +_A PydanticV2 validation and serialization library for the STAC ML Model Extension_ + +
+ +## Installation + +```bash +pip install -U stac-model +``` + +or install with `Poetry`: + +```bash +poetry add stac-model +``` +Then you can run + +```bash +stac-model --help +``` + +or with `Poetry`: + +```bash +poetry run stac-model --help +``` + +## Creating an example metadata json + +``` +poetry run stac-model +``` + +This will make an example example.json metadata file for an example model. + +Currently this looks like + +``` +{ + "signatures": { + "inputs": [ + { + "name": "input_tensor", + "dtype": "float32", + "shape": [ + -1, + 13, + 64, + 64 + ] + } + ], + "outputs": [ + { + "name": "output_tensor", + "dtype": "float32", + "shape": [ + -1, + 10 + ] + } + ], + "params": null + }, + "artifact": { + "path": "s3://example/s3/uri/model.pt", + "additional_files": null + }, + "id": "3fa03dceb4004b6e8a9e8591e4b3a99d", + "class_map": { + "class_to_label_id": { + "Annual Crop": 0, + "Forest": 1, + "Herbaceous Vegetation": 2, + "Highway": 3, + "Industrial Buildings": 4, + "Pasture": 5, + "Permanent Crop": 6, + "Residential Buildings": 7, + "River": 8, + "SeaLake": 9 + } + }, + "runtime_config": null, + "name": "eurosat", + "ml_model_type": null, + "ml_model_processor_type": "cpu", + "ml_model_learning_approach": null, + "ml_model_prediction_type": null, + "ml_model_architecture": null +} +``` + +## :chart_with_upwards_trend: Releases + +You can see the list of available releases on the [GitHub Releases][r1] page. + + +[![License][blic1]][blic2] + +This project is licenced under the terms of the `Apache Software License 2.0` licence. See [LICENCE][blic2] for more details. + + +## Credits [![Python project templated from galactipy.][bp6]][bp7] + +This project was generated with [`galactipy`][bp7]. + + + +[bp1]: https://img.shields.io/pypi/pyversions/stac-model?style=for-the-badge +[bp2]: https://pypi.org/project/stac-model/ +[bp3]: https://img.shields.io/pypi/v/stac-model?style=for-the-badge&logo=pypi&color=3775a9 +[bp4]: https://github.com/stac-extensions/stac-model +[bp5]: https://github.com/stac-extensions/stac-model/releases +[bp6]: https://img.shields.io/badge/made%20with-galactipy%20%F0%9F%8C%8C-179287?style=for-the-badge&labelColor=193A3E +[bp7]: https://kutt.it/7fYqQl +[bp8]: https://img.shields.io/static/v1.svg?label=Contributions&message=Welcome&color=0059b3&style=for-the-badge +[bp9]: https://github.com/stac-extensions/stac-model/blob/main/CONTRIBUTING.md +[bp11]: https://img.shields.io/endpoint?url=https://python-poetry.org/badge/v0.json&style=for-the-badge +[bp12]: https://python-poetry.org/ + +[bp15]: https://img.shields.io/badge/pre--commit-enabled-brightgreen?logo=pre-commit&logoColor=white&style=for-the-badge +[bp16]: https://github.com/stac-extensions/stac-model/blob/main/.pre-commit-config.yaml + +[blic1]: https://img.shields.io/github/license/stac-extensions/stac-model?style=for-the-badge +[blic2]: https://github.com/stac-extensions/stac-model/blob/main/LICENCE +[blic3]: https://img.shields.io/badge/%F0%9F%93%A6-semantic%20versions-4053D6?style=for-the-badge + +[r1]: https://github.com/stac-extensions/stac-model/releases + +[bscm1]: https://img.shields.io/badge/GitHub-100000?style=for-the-badge&logo=github&logoColor=white +[bscm2]: https://img.shields.io/github/v/release/stac-extensions/stac-model?style=for-the-badge&logo=semantic-release&color=347d39 +[bscm6]: https://img.shields.io/github/actions/workflow/status/stac-extensions/stac-model/build.yml?style=for-the-badge&logo=github +[bscm7]: https://github.com/stac-extensions/stac-model/actions/workflows/build.yml + +[hub1]: https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuring-dependabot-version-updates#enabling-dependabot-version-updates +[hub2]: https://github.com/marketplace/actions/close-stale-issues +[hub5]: https://github.com/stac-extensions/stac-model/blob/main/.github/workflows/build.yml +[hub6]: https://docs.github.com/en/code-security/dependabot +[hub8]: https://github.com/stac-extensions/stac-model/blob/main/.github/release-drafter.yml +[hub9]: https://github.com/stac-extensions/stac-model/blob/main/.github/.stale.yml + +[bdoc1]: https://img.shields.io/badge/docs-github%20pages-0a507a?style=for-the-badge +[bdoc2]: https://stac-extensions.github.io/stac-model diff --git a/stac_model/SECURITY.md b/stac_model/SECURITY.md new file mode 100644 index 0000000..9ca2669 --- /dev/null +++ b/stac_model/SECURITY.md @@ -0,0 +1,29 @@ +# Security + +## :closed_lock_with_key: Reporting Security Issues + +> Do not open issues that might have security implications! +> It is critical that security related issues are reported privately so we have time to address them before they become public knowledge. + +Vulnerabilities can be reported by emailing core members: + +- Ryan Avery <[ryan@wherobots.com][1]> + +Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: + +- Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) +- Full paths of source file(s) related to the manifestation of the issue +- The location of the affected source code (tag/branch/commit or direct URL) +- Any special configuration required to reproduce the issue +- Environment (e.g. Linux / Windows / macOS) +- Step-by-step instructions to reproduce the issue +- Proof-of-concept or exploit code (if possible) +- Impact of the issue, including how an attacker might exploit the issue + +This information will help us triage your report more quickly. + +## Preferred Languages + +We prefer all communications to be in English. + +[1]: mailto:ryan@wherobots.com diff --git a/stac_model/docker/Dockerfile b/stac_model/docker/Dockerfile new file mode 100644 index 0000000..8c81a07 --- /dev/null +++ b/stac_model/docker/Dockerfile @@ -0,0 +1,25 @@ +FROM python:3.10-slim-buster + +ENV LANG=C.UTF-8 \ + LC_ALL=C.UTF-8 \ + PATH="${PATH}:/root/.poetry/bin" + +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + curl \ + && rm -rf /var/lib/apt/lists/* + +COPY pyproject.toml ./ + +# Install Poetry +RUN curl -sSL https://install.python-poetry.org | POETRY_HOME=/opt/poetry python && \ + cd /usr/local/bin && \ + ln -s /opt/poetry/bin/poetry && \ + poetry config virtualenvs.create false + +# Allow installing dev dependencies to run tests +ARG INSTALL_DEV=false +RUN bash -c "if [ $INSTALL_DEV == 'true' ] ; then poetry install --no-root ; else poetry install --no-root --no-dev ; fi" + +CMD mkdir -p /workspace +WORKDIR /workspace diff --git a/stac_model/docker/README.md b/stac_model/docker/README.md new file mode 100644 index 0000000..614d5b4 --- /dev/null +++ b/stac_model/docker/README.md @@ -0,0 +1,47 @@ +# Docker for stac-model + +## Installation + +To create Docker you need to run: + +```bash +make docker-build +``` + +which is equivalent to: + +```bash +make docker-build VERSION=latest +``` + +You may provide name and version for the image. +Default name is `IMAGE := stac_model`. +Default version is `VERSION := latest`. + +```bash +make docker-build IMAGE=some_name VERSION=0.1.0 +``` + +## Usage + +```bash +docker run -it --rm \ + -v $(pwd):/workspace \ + stac_model bash +``` + +## How to clean up + +To uninstall docker image run `make docker-remove` with `VERSION`: + +```bash +make docker-remove VERSION=0.1.0 +``` + +you may also choose the image name + +```bash +make docker-remove IMAGE=some_name VERSION=latest +``` + +If you want to clean all, including `build` and `pycache` run `make cleanup` diff --git a/stac_model/docs/.gitkeep b/stac_model/docs/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/stac_model/example.json b/stac_model/example.json new file mode 100644 index 0000000..df5bc45 --- /dev/null +++ b/stac_model/example.json @@ -0,0 +1,53 @@ +{ + "signatures": { + "inputs": [ + { + "name": "input_tensor", + "dtype": "float32", + "shape": [ + -1, + 13, + 64, + 64 + ] + } + ], + "outputs": [ + { + "name": "output_tensor", + "dtype": "float32", + "shape": [ + -1, + 10 + ] + } + ], + "params": null + }, + "artifact": { + "path": "s3://example/s3/uri/model.pt", + "additional_files": null + }, + "id": "3fa03dceb4004b6e8a9e8591e4b3a99d", + "class_map": { + "class_to_label_id": { + "Annual Crop": 0, + "Forest": 1, + "Herbaceous Vegetation": 2, + "Highway": 3, + "Industrial Buildings": 4, + "Pasture": 5, + "Permanent Crop": 6, + "Residential Buildings": 7, + "River": 8, + "SeaLake": 9 + } + }, + "runtime_config": null, + "name": "eurosat", + "ml_model_type": null, + "ml_model_processor_type": "cpu", + "ml_model_learning_approach": null, + "ml_model_prediction_type": null, + "ml_model_architecture": null +} diff --git a/stac_model/model_metadata.py b/stac_model/model_metadata.py new file mode 100644 index 0000000..447f441 --- /dev/null +++ b/stac_model/model_metadata.py @@ -0,0 +1,102 @@ +from pydantic import BaseModel, Field, FilePath, AnyUrl +from typing import Optional, List, Tuple, Dict, Literal, Any +from uuid import uuid4 +import numpy as np +import re + +# Pydantic Models +class TensorSignature(BaseModel): + name: Optional[str] = None + dtype: Any = Field(...) + shape: Tuple[int, ...] | List[int] = Field(...) + +class ModelSignature(BaseModel): + inputs: List[TensorSignature] + outputs: List[TensorSignature] + params: Optional[Dict[str, int | float | str]] = None + + class Config: + arbitrary_types_allowed = True + +class RuntimeConfig(BaseModel): + environment: str + +class S3Path(AnyUrl): + allowed_schemes = {'s3'} + user_required = False + max_length = 1023 + min_length = 8 + + @classmethod + def validate_s3_url(cls, v): + if not v.startswith('s3://'): + raise ValueError('S3 path must start with s3://') + return v + + @classmethod + def validate_bucket_name(cls, v): + if not v: + raise ValueError('Bucket name cannot be empty') + return v + + @classmethod + def validate_key(cls, v): + if '//' in v: + raise ValueError('Key must not contain double slashes') + return v.strip('/') + +class ModelArtifact(BaseModel): + path: S3Path | FilePath | str = Field(...) + additional_files: Optional[Dict[str, FilePath]] = None + + class Config: + arbitrary_types_allowed = True + +class ClassMap(BaseModel): + class_to_label_id: Dict[str, int] + + @property + def label_id_to_class(self) -> Dict[int, str]: + return {v: k for k, v in self.class_to_label_id.items()} + +class ModelMetadata(BaseModel): + signatures: ModelSignature + artifact: ModelArtifact + id: str = Field(default_factory=lambda: uuid4().hex) + class_map: ClassMap + runtime_config: Optional[RuntimeConfig] = None + name: str + ml_model_type: Optional[str] = None + ml_model_processor_type: Optional[Literal["cpu", "gpu", "tpu", "mps"]] = None + ml_model_learning_approach: Optional[str] = None + ml_model_prediction_type: Optional[Literal["object-detection", "classification", "segmentation", "regression"]] = None + ml_model_architecture: Optional[str] = None + + class Config: + arbitrary_types_allowed = True + +# Functions to create, serialize, and deserialize ModelMetadata +def create_metadata(): + input_sig = TensorSignature(name='input_tensor', dtype='float32', shape=(-1, 13, 64, 64)) + output_sig = TensorSignature(name='output_tensor', dtype='float32', shape=(-1, 10)) + model_sig = ModelSignature(inputs=[input_sig], outputs=[output_sig]) + model_artifact = ModelArtifact(path="s3://example/s3/uri/model.pt") + class_map = ClassMap(class_to_label_id={ + 'Annual Crop': 0, 'Forest': 1, 'Herbaceous Vegetation': 2, 'Highway': 3, + 'Industrial Buildings': 4, 'Pasture': 5, 'Permanent Crop': 6, + 'Residential Buildings': 7, 'River': 8, 'SeaLake': 9 + }) + return ModelMetadata(name="eurosat", class_map=class_map, signatures=model_sig, artifact=model_artifact, ml_model_processor_type="cpu") + +def metadata_json(metadata: ModelMetadata) -> str: + return metadata.model_dump_json(indent=2) + +def model_metadata_json_operations(json_str: str) -> ModelMetadata: + return ModelMetadata.model_validate_json(json_str) + +# Running the functions end-to-end +metadata = create_metadata() +json_str = metadata_json(metadata) +model_metadata = model_metadata_json_operations(json_str) + +print("Model Metadata Name:", model_metadata.name) diff --git a/stac_model/poetry.lock b/stac_model/poetry.lock new file mode 100644 index 0000000..312b915 --- /dev/null +++ b/stac_model/poetry.lock @@ -0,0 +1,1355 @@ +# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand. + +[[package]] +name = "annotated-types" +version = "0.6.0" +description = "Reusable constraint types to use with typing.Annotated" +optional = false +python-versions = ">=3.8" +files = [ + {file = "annotated_types-0.6.0-py3-none-any.whl", hash = "sha256:0641064de18ba7a25dee8f96403ebc39113d0cb953a01429249d5c7564666a43"}, + {file = "annotated_types-0.6.0.tar.gz", hash = "sha256:563339e807e53ffd9c267e99fc6d9ea23eb8443c08f112651963e24e22f84a5d"}, +] + +[[package]] +name = "bandit" +version = "1.7.6" +description = "Security oriented static analyser for python code." +optional = false +python-versions = ">=3.8" +files = [ + {file = "bandit-1.7.6-py3-none-any.whl", hash = "sha256:36da17c67fc87579a5d20c323c8d0b1643a890a2b93f00b3d1229966624694ff"}, + {file = "bandit-1.7.6.tar.gz", hash = "sha256:72ce7bc9741374d96fb2f1c9a8960829885f1243ffde743de70a19cee353e8f3"}, +] + +[package.dependencies] +colorama = {version = ">=0.3.9", markers = "platform_system == \"Windows\""} +GitPython = ">=3.1.30" +PyYAML = ">=5.3.1" +rich = "*" +stevedore = ">=1.20.0" + +[package.extras] +test = ["beautifulsoup4 (>=4.8.0)", "coverage (>=4.5.4)", "fixtures (>=3.0.0)", "flake8 (>=4.0.0)", "pylint (==1.9.4)", "stestr (>=2.5.0)", "testscenarios (>=0.5.0)", "testtools (>=2.3.0)", "tomli (>=1.1.0)"] +toml = ["tomli (>=1.1.0)"] +yaml = ["PyYAML"] + +[[package]] +name = "certifi" +version = "2023.11.17" +description = "Python package for providing Mozilla's CA Bundle." +optional = false +python-versions = ">=3.6" +files = [ + {file = "certifi-2023.11.17-py3-none-any.whl", hash = "sha256:e036ab49d5b79556f99cfc2d9320b34cfbe5be05c5871b51de9329f0603b0474"}, + {file = "certifi-2023.11.17.tar.gz", hash = "sha256:9b469f3a900bf28dc19b8cfbf8019bf47f7fdd1a65a1d4ffb98fc14166beb4d1"}, +] + +[[package]] +name = "cfgv" +version = "3.4.0" +description = "Validate configuration and produce human readable error messages." +optional = false +python-versions = ">=3.8" +files = [ + {file = "cfgv-3.4.0-py2.py3-none-any.whl", hash = "sha256:b7265b1f29fd3316bfcd2b330d63d024f2bfd8bcb8b0272f8e19a504856c48f9"}, + {file = "cfgv-3.4.0.tar.gz", hash = "sha256:e52591d4c5f5dead8e0f673fb16db7949d2cfb3f7da4582893288f0ded8fe560"}, +] + +[[package]] +name = "charset-normalizer" +version = "3.3.2" +description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-win32.whl", hash = "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-win32.whl", hash = "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-win32.whl", hash = "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-win32.whl", hash = "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-win32.whl", hash = "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-win32.whl", hash = "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d"}, + {file = "charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc"}, +] + +[[package]] +name = "click" +version = "8.1.7" +description = "Composable command line interface toolkit" +optional = false +python-versions = ">=3.7" +files = [ + {file = "click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28"}, + {file = "click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + +[[package]] +name = "colorama" +version = "0.4.6" +description = "Cross-platform colored terminal text." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +files = [ + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] + +[[package]] +name = "commonmark" +version = "0.9.1" +description = "Python parser for the CommonMark Markdown spec" +optional = false +python-versions = "*" +files = [ + {file = "commonmark-0.9.1-py2.py3-none-any.whl", hash = "sha256:da2f38c92590f83de410ba1a3cbceafbc74fee9def35f9251ba9a971d6d66fd9"}, + {file = "commonmark-0.9.1.tar.gz", hash = "sha256:452f9dc859be7f06631ddcb328b6919c67984aca654e5fefb3914d54691aed60"}, +] + +[package.extras] +test = ["flake8 (==3.7.8)", "hypothesis (==3.55.3)"] + +[[package]] +name = "coverage" +version = "7.3.2" +description = "Code coverage measurement for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "coverage-7.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d872145f3a3231a5f20fd48500274d7df222e291d90baa2026cc5152b7ce86bf"}, + {file = "coverage-7.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:310b3bb9c91ea66d59c53fa4989f57d2436e08f18fb2f421a1b0b6b8cc7fffda"}, + {file = "coverage-7.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f47d39359e2c3779c5331fc740cf4bce6d9d680a7b4b4ead97056a0ae07cb49a"}, + {file = "coverage-7.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aa72dbaf2c2068404b9870d93436e6d23addd8bbe9295f49cbca83f6e278179c"}, + {file = "coverage-7.3.2-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:beaa5c1b4777f03fc63dfd2a6bd820f73f036bfb10e925fce067b00a340d0f3f"}, + {file = "coverage-7.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:dbc1b46b92186cc8074fee9d9fbb97a9dd06c6cbbef391c2f59d80eabdf0faa6"}, + {file = "coverage-7.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:315a989e861031334d7bee1f9113c8770472db2ac484e5b8c3173428360a9148"}, + {file = "coverage-7.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:d1bc430677773397f64a5c88cb522ea43175ff16f8bfcc89d467d974cb2274f9"}, + {file = "coverage-7.3.2-cp310-cp310-win32.whl", hash = "sha256:a889ae02f43aa45032afe364c8ae84ad3c54828c2faa44f3bfcafecb5c96b02f"}, + {file = "coverage-7.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:c0ba320de3fb8c6ec16e0be17ee1d3d69adcda99406c43c0409cb5c41788a611"}, + {file = "coverage-7.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ac8c802fa29843a72d32ec56d0ca792ad15a302b28ca6203389afe21f8fa062c"}, + {file = "coverage-7.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:89a937174104339e3a3ffcf9f446c00e3a806c28b1841c63edb2b369310fd074"}, + {file = "coverage-7.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e267e9e2b574a176ddb983399dec325a80dbe161f1a32715c780b5d14b5f583a"}, + {file = "coverage-7.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2443cbda35df0d35dcfb9bf8f3c02c57c1d6111169e3c85fc1fcc05e0c9f39a3"}, + {file = "coverage-7.3.2-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4175e10cc8dda0265653e8714b3174430b07c1dca8957f4966cbd6c2b1b8065a"}, + {file = "coverage-7.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0cbf38419fb1a347aaf63481c00f0bdc86889d9fbf3f25109cf96c26b403fda1"}, + {file = "coverage-7.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:5c913b556a116b8d5f6ef834038ba983834d887d82187c8f73dec21049abd65c"}, + {file = "coverage-7.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1981f785239e4e39e6444c63a98da3a1db8e971cb9ceb50a945ba6296b43f312"}, + {file = "coverage-7.3.2-cp311-cp311-win32.whl", hash = "sha256:43668cabd5ca8258f5954f27a3aaf78757e6acf13c17604d89648ecc0cc66640"}, + {file = "coverage-7.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10c39c0452bf6e694511c901426d6b5ac005acc0f78ff265dbe36bf81f808a2"}, + {file = "coverage-7.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:4cbae1051ab791debecc4a5dcc4a1ff45fc27b91b9aee165c8a27514dd160836"}, + {file = "coverage-7.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:12d15ab5833a997716d76f2ac1e4b4d536814fc213c85ca72756c19e5a6b3d63"}, + {file = "coverage-7.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c7bba973ebee5e56fe9251300c00f1579652587a9f4a5ed8404b15a0471f216"}, + {file = "coverage-7.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fe494faa90ce6381770746077243231e0b83ff3f17069d748f645617cefe19d4"}, + {file = "coverage-7.3.2-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6e9589bd04d0461a417562649522575d8752904d35c12907d8c9dfeba588faf"}, + {file = "coverage-7.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d51ac2a26f71da1b57f2dc81d0e108b6ab177e7d30e774db90675467c847bbdf"}, + {file = "coverage-7.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:99b89d9f76070237975b315b3d5f4d6956ae354a4c92ac2388a5695516e47c84"}, + {file = "coverage-7.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:fa28e909776dc69efb6ed975a63691bc8172b64ff357e663a1bb06ff3c9b589a"}, + {file = "coverage-7.3.2-cp312-cp312-win32.whl", hash = "sha256:289fe43bf45a575e3ab10b26d7b6f2ddb9ee2dba447499f5401cfb5ecb8196bb"}, + {file = "coverage-7.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:7dbc3ed60e8659bc59b6b304b43ff9c3ed858da2839c78b804973f613d3e92ed"}, + {file = "coverage-7.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f94b734214ea6a36fe16e96a70d941af80ff3bfd716c141300d95ebc85339738"}, + {file = "coverage-7.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:af3d828d2c1cbae52d34bdbb22fcd94d1ce715d95f1a012354a75e5913f1bda2"}, + {file = "coverage-7.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:630b13e3036e13c7adc480ca42fa7afc2a5d938081d28e20903cf7fd687872e2"}, + {file = "coverage-7.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c9eacf273e885b02a0273bb3a2170f30e2d53a6d53b72dbe02d6701b5296101c"}, + {file = "coverage-7.3.2-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d8f17966e861ff97305e0801134e69db33b143bbfb36436efb9cfff6ec7b2fd9"}, + {file = "coverage-7.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b4275802d16882cf9c8b3d057a0839acb07ee9379fa2749eca54efbce1535b82"}, + {file = "coverage-7.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:72c0cfa5250f483181e677ebc97133ea1ab3eb68645e494775deb6a7f6f83901"}, + {file = "coverage-7.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:cb536f0dcd14149425996821a168f6e269d7dcd2c273a8bff8201e79f5104e76"}, + {file = "coverage-7.3.2-cp38-cp38-win32.whl", hash = "sha256:307adb8bd3abe389a471e649038a71b4eb13bfd6b7dd9a129fa856f5c695cf92"}, + {file = "coverage-7.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:88ed2c30a49ea81ea3b7f172e0269c182a44c236eb394718f976239892c0a27a"}, + {file = "coverage-7.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b631c92dfe601adf8f5ebc7fc13ced6bb6e9609b19d9a8cd59fa47c4186ad1ce"}, + {file = "coverage-7.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d3d9df4051c4a7d13036524b66ecf7a7537d14c18a384043f30a303b146164e9"}, + {file = "coverage-7.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f7363d3b6a1119ef05015959ca24a9afc0ea8a02c687fe7e2d557705375c01f"}, + {file = "coverage-7.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2f11cc3c967a09d3695d2a6f03fb3e6236622b93be7a4b5dc09166a861be6d25"}, + {file = "coverage-7.3.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:149de1d2401ae4655c436a3dced6dd153f4c3309f599c3d4bd97ab172eaf02d9"}, + {file = "coverage-7.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:3a4006916aa6fee7cd38db3bfc95aa9c54ebb4ffbfc47c677c8bba949ceba0a6"}, + {file = "coverage-7.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9028a3871280110d6e1aa2df1afd5ef003bab5fb1ef421d6dc748ae1c8ef2ebc"}, + {file = "coverage-7.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9f805d62aec8eb92bab5b61c0f07329275b6f41c97d80e847b03eb894f38d083"}, + {file = "coverage-7.3.2-cp39-cp39-win32.whl", hash = "sha256:d1c88ec1a7ff4ebca0219f5b1ef863451d828cccf889c173e1253aa84b1e07ce"}, + {file = "coverage-7.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:b4767da59464bb593c07afceaddea61b154136300881844768037fd5e859353f"}, + {file = "coverage-7.3.2-pp38.pp39.pp310-none-any.whl", hash = "sha256:ae97af89f0fbf373400970c0a21eef5aa941ffeed90aee43650b81f7d7f47637"}, + {file = "coverage-7.3.2.tar.gz", hash = "sha256:be32ad29341b0170e795ca590e1c07e81fc061cb5b10c74ce7203491484404ef"}, +] + +[package.dependencies] +tomli = {version = "*", optional = true, markers = "python_full_version <= \"3.11.0a6\" and extra == \"toml\""} + +[package.extras] +toml = ["tomli"] + +[[package]] +name = "distlib" +version = "0.3.7" +description = "Distribution utilities" +optional = false +python-versions = "*" +files = [ + {file = "distlib-0.3.7-py2.py3-none-any.whl", hash = "sha256:2e24928bc811348f0feb63014e97aaae3037f2cf48712d51ae61df7fd6075057"}, + {file = "distlib-0.3.7.tar.gz", hash = "sha256:9dafe54b34a028eafd95039d5e5d4851a13734540f1331060d31c9916e7147a8"}, +] + +[[package]] +name = "docstring-parser-fork" +version = "0.0.5" +description = "Parse Python docstrings in reST, Google and Numpydoc format" +optional = false +python-versions = ">=3.6,<4.0" +files = [ + {file = "docstring_parser_fork-0.0.5-py3-none-any.whl", hash = "sha256:d521dea9b9cc6c60ab5569fa0c1115e3b84a83e6413266fb111a7c81cb935997"}, + {file = "docstring_parser_fork-0.0.5.tar.gz", hash = "sha256:395ae8ee6a359e268670ebc4fe9a40dab917a94f6decd7cda8e86f9bea5c9456"}, +] + +[[package]] +name = "dparse" +version = "0.6.3" +description = "A parser for Python dependency files" +optional = false +python-versions = ">=3.6" +files = [ + {file = "dparse-0.6.3-py3-none-any.whl", hash = "sha256:0d8fe18714056ca632d98b24fbfc4e9791d4e47065285ab486182288813a5318"}, + {file = "dparse-0.6.3.tar.gz", hash = "sha256:27bb8b4bcaefec3997697ba3f6e06b2447200ba273c0b085c3d012a04571b528"}, +] + +[package.dependencies] +packaging = "*" +tomli = {version = "*", markers = "python_version < \"3.11\""} + +[package.extras] +conda = ["pyyaml"] +pipenv = ["pipenv (<=2022.12.19)"] + +[[package]] +name = "exceptiongroup" +version = "1.2.0" +description = "Backport of PEP 654 (exception groups)" +optional = false +python-versions = ">=3.7" +files = [ + {file = "exceptiongroup-1.2.0-py3-none-any.whl", hash = "sha256:4bfd3996ac73b41e9b9628b04e079f193850720ea5945fc96a08633c66912f14"}, + {file = "exceptiongroup-1.2.0.tar.gz", hash = "sha256:91f5c769735f051a4290d52edd0858999b57e5876e9f85937691bd4c9fa3ed68"}, +] + +[package.extras] +test = ["pytest (>=6)"] + +[[package]] +name = "filelock" +version = "3.13.1" +description = "A platform independent file lock." +optional = false +python-versions = ">=3.8" +files = [ + {file = "filelock-3.13.1-py3-none-any.whl", hash = "sha256:57dbda9b35157b05fb3e58ee91448612eb674172fab98ee235ccb0b5bee19a1c"}, + {file = "filelock-3.13.1.tar.gz", hash = "sha256:521f5f56c50f8426f5e03ad3b281b490a87ef15bc6c526f168290f0c7148d44e"}, +] + +[package.extras] +docs = ["furo (>=2023.9.10)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.24)"] +testing = ["covdefaults (>=2.3)", "coverage (>=7.3.2)", "diff-cover (>=8)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)", "pytest-timeout (>=2.2)"] +typing = ["typing-extensions (>=4.8)"] + +[[package]] +name = "gitdb" +version = "4.0.11" +description = "Git Object Database" +optional = false +python-versions = ">=3.7" +files = [ + {file = "gitdb-4.0.11-py3-none-any.whl", hash = "sha256:81a3407ddd2ee8df444cbacea00e2d038e40150acfa3001696fe0dcf1d3adfa4"}, + {file = "gitdb-4.0.11.tar.gz", hash = "sha256:bf5421126136d6d0af55bc1e7c1af1c397a34f5b7bd79e776cd3e89785c2b04b"}, +] + +[package.dependencies] +smmap = ">=3.0.1,<6" + +[[package]] +name = "gitpython" +version = "3.1.40" +description = "GitPython is a Python library used to interact with Git repositories" +optional = false +python-versions = ">=3.7" +files = [ + {file = "GitPython-3.1.40-py3-none-any.whl", hash = "sha256:cf14627d5a8049ffbf49915732e5eddbe8134c3bdb9d476e6182b676fc573f8a"}, + {file = "GitPython-3.1.40.tar.gz", hash = "sha256:22b126e9ffb671fdd0c129796343a02bf67bf2994b35449ffc9321aa755e18a4"}, +] + +[package.dependencies] +gitdb = ">=4.0.1,<5" + +[package.extras] +test = ["black", "coverage[toml]", "ddt (>=1.1.1,!=1.4.3)", "mock", "mypy", "pre-commit", "pytest", "pytest-cov", "pytest-instafail", "pytest-subtests", "pytest-sugar"] + +[[package]] +name = "identify" +version = "2.5.33" +description = "File identification library for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "identify-2.5.33-py2.py3-none-any.whl", hash = "sha256:d40ce5fcd762817627670da8a7d8d8e65f24342d14539c59488dc603bf662e34"}, + {file = "identify-2.5.33.tar.gz", hash = "sha256:161558f9fe4559e1557e1bff323e8631f6a0e4837f7497767c1782832f16b62d"}, +] + +[package.extras] +license = ["ukkonen"] + +[[package]] +name = "idna" +version = "3.6" +description = "Internationalized Domain Names in Applications (IDNA)" +optional = false +python-versions = ">=3.5" +files = [ + {file = "idna-3.6-py3-none-any.whl", hash = "sha256:c05567e9c24a6b9faaa835c4821bad0590fbb9d5779e7caa6e1cc4978e7eb24f"}, + {file = "idna-3.6.tar.gz", hash = "sha256:9ecdbbd083b06798ae1e86adcbfe8ab1479cf864e4ee30fe4e46a003d12491ca"}, +] + +[[package]] +name = "iniconfig" +version = "2.0.0" +description = "brain-dead simple config-ini parsing" +optional = false +python-versions = ">=3.7" +files = [ + {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, + {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, +] + +[[package]] +name = "mypy" +version = "1.0.1" +description = "Optional static typing for Python" +optional = false +python-versions = ">=3.7" +files = [ + {file = "mypy-1.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:71a808334d3f41ef011faa5a5cd8153606df5fc0b56de5b2e89566c8093a0c9a"}, + {file = "mypy-1.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:920169f0184215eef19294fa86ea49ffd4635dedfdea2b57e45cb4ee85d5ccaf"}, + {file = "mypy-1.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:27a0f74a298769d9fdc8498fcb4f2beb86f0564bcdb1a37b58cbbe78e55cf8c0"}, + {file = "mypy-1.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:65b122a993d9c81ea0bfde7689b3365318a88bde952e4dfa1b3a8b4ac05d168b"}, + {file = "mypy-1.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:5deb252fd42a77add936b463033a59b8e48eb2eaec2976d76b6878d031933fe4"}, + {file = "mypy-1.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2013226d17f20468f34feddd6aae4635a55f79626549099354ce641bc7d40262"}, + {file = "mypy-1.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:48525aec92b47baed9b3380371ab8ab6e63a5aab317347dfe9e55e02aaad22e8"}, + {file = "mypy-1.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c96b8a0c019fe29040d520d9257d8c8f122a7343a8307bf8d6d4a43f5c5bfcc8"}, + {file = "mypy-1.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:448de661536d270ce04f2d7dddaa49b2fdba6e3bd8a83212164d4174ff43aa65"}, + {file = "mypy-1.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:d42a98e76070a365a1d1c220fcac8aa4ada12ae0db679cb4d910fabefc88b994"}, + {file = "mypy-1.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:e64f48c6176e243ad015e995de05af7f22bbe370dbb5b32bd6988438ec873919"}, + {file = "mypy-1.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5fdd63e4f50e3538617887e9aee91855368d9fc1dea30da743837b0df7373bc4"}, + {file = "mypy-1.0.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:dbeb24514c4acbc78d205f85dd0e800f34062efcc1f4a4857c57e4b4b8712bff"}, + {file = "mypy-1.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a2948c40a7dd46c1c33765718936669dc1f628f134013b02ff5ac6c7ef6942bf"}, + {file = "mypy-1.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5bc8d6bd3b274dd3846597855d96d38d947aedba18776aa998a8d46fabdaed76"}, + {file = "mypy-1.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:17455cda53eeee0a4adb6371a21dd3dbf465897de82843751cf822605d152c8c"}, + {file = "mypy-1.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e831662208055b006eef68392a768ff83596035ffd6d846786578ba1714ba8f6"}, + {file = "mypy-1.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:e60d0b09f62ae97a94605c3f73fd952395286cf3e3b9e7b97f60b01ddfbbda88"}, + {file = "mypy-1.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:0af4f0e20706aadf4e6f8f8dc5ab739089146b83fd53cb4a7e0e850ef3de0bb6"}, + {file = "mypy-1.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:24189f23dc66f83b839bd1cce2dfc356020dfc9a8bae03978477b15be61b062e"}, + {file = "mypy-1.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:93a85495fb13dc484251b4c1fd7a5ac370cd0d812bbfc3b39c1bafefe95275d5"}, + {file = "mypy-1.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5f546ac34093c6ce33f6278f7c88f0f147a4849386d3bf3ae193702f4fe31407"}, + {file = "mypy-1.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c6c2ccb7af7154673c591189c3687b013122c5a891bb5651eca3db8e6c6c55bd"}, + {file = "mypy-1.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:15b5a824b58c7c822c51bc66308e759243c32631896743f030daf449fe3677f3"}, + {file = "mypy-1.0.1-py3-none-any.whl", hash = "sha256:eda5c8b9949ed411ff752b9a01adda31afe7eae1e53e946dbdf9db23865e66c4"}, + {file = "mypy-1.0.1.tar.gz", hash = "sha256:28cea5a6392bb43d266782983b5a4216c25544cd7d80be681a155ddcdafd152d"}, +] + +[package.dependencies] +mypy-extensions = ">=0.4.3" +tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} +typing-extensions = ">=3.10" + +[package.extras] +dmypy = ["psutil (>=4.0)"] +install-types = ["pip"] +python2 = ["typed-ast (>=1.4.0,<2)"] +reports = ["lxml"] + +[[package]] +name = "mypy-extensions" +version = "0.4.4" +description = "Experimental type system extensions for programs checked with the mypy typechecker." +optional = false +python-versions = ">=2.7" +files = [ + {file = "mypy_extensions-0.4.4.tar.gz", hash = "sha256:c8b707883a96efe9b4bb3aaf0dcc07e7e217d7d8368eec4db4049ee9e142f4fd"}, +] + +[[package]] +name = "nodeenv" +version = "1.8.0" +description = "Node.js virtual environment builder" +optional = false +python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*" +files = [ + {file = "nodeenv-1.8.0-py2.py3-none-any.whl", hash = "sha256:df865724bb3c3adc86b3876fa209771517b0cfe596beff01a92700e0e8be4cec"}, + {file = "nodeenv-1.8.0.tar.gz", hash = "sha256:d51e0c37e64fbf47d017feac3145cdbb58836d7eee8c6f6d3b6880c5456227d2"}, +] + +[package.dependencies] +setuptools = "*" + +[[package]] +name = "numpy" +version = "1.26.2" +description = "Fundamental package for array computing in Python" +optional = false +python-versions = ">=3.9" +files = [ + {file = "numpy-1.26.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:3703fc9258a4a122d17043e57b35e5ef1c5a5837c3db8be396c82e04c1cf9b0f"}, + {file = "numpy-1.26.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:cc392fdcbd21d4be6ae1bb4475a03ce3b025cd49a9be5345d76d7585aea69440"}, + {file = "numpy-1.26.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:36340109af8da8805d8851ef1d74761b3b88e81a9bd80b290bbfed61bd2b4f75"}, + {file = "numpy-1.26.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bcc008217145b3d77abd3e4d5ef586e3bdfba8fe17940769f8aa09b99e856c00"}, + {file = "numpy-1.26.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3ced40d4e9e18242f70dd02d739e44698df3dcb010d31f495ff00a31ef6014fe"}, + {file = "numpy-1.26.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b272d4cecc32c9e19911891446b72e986157e6a1809b7b56518b4f3755267523"}, + {file = "numpy-1.26.2-cp310-cp310-win32.whl", hash = "sha256:22f8fc02fdbc829e7a8c578dd8d2e15a9074b630d4da29cda483337e300e3ee9"}, + {file = "numpy-1.26.2-cp310-cp310-win_amd64.whl", hash = "sha256:26c9d33f8e8b846d5a65dd068c14e04018d05533b348d9eaeef6c1bd787f9919"}, + {file = "numpy-1.26.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b96e7b9c624ef3ae2ae0e04fa9b460f6b9f17ad8b4bec6d7756510f1f6c0c841"}, + {file = "numpy-1.26.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:aa18428111fb9a591d7a9cc1b48150097ba6a7e8299fb56bdf574df650e7d1f1"}, + {file = "numpy-1.26.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:06fa1ed84aa60ea6ef9f91ba57b5ed963c3729534e6e54055fc151fad0423f0a"}, + {file = "numpy-1.26.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96ca5482c3dbdd051bcd1fce8034603d6ebfc125a7bd59f55b40d8f5d246832b"}, + {file = "numpy-1.26.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:854ab91a2906ef29dc3925a064fcd365c7b4da743f84b123002f6139bcb3f8a7"}, + {file = "numpy-1.26.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f43740ab089277d403aa07567be138fc2a89d4d9892d113b76153e0e412409f8"}, + {file = "numpy-1.26.2-cp311-cp311-win32.whl", hash = "sha256:a2bbc29fcb1771cd7b7425f98b05307776a6baf43035d3b80c4b0f29e9545186"}, + {file = "numpy-1.26.2-cp311-cp311-win_amd64.whl", hash = "sha256:2b3fca8a5b00184828d12b073af4d0fc5fdd94b1632c2477526f6bd7842d700d"}, + {file = "numpy-1.26.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:a4cd6ed4a339c21f1d1b0fdf13426cb3b284555c27ac2f156dfdaaa7e16bfab0"}, + {file = "numpy-1.26.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5d5244aabd6ed7f312268b9247be47343a654ebea52a60f002dc70c769048e75"}, + {file = "numpy-1.26.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a3cdb4d9c70e6b8c0814239ead47da00934666f668426fc6e94cce869e13fd7"}, + {file = "numpy-1.26.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa317b2325f7aa0a9471663e6093c210cb2ae9c0ad824732b307d2c51983d5b6"}, + {file = "numpy-1.26.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:174a8880739c16c925799c018f3f55b8130c1f7c8e75ab0a6fa9d41cab092fd6"}, + {file = "numpy-1.26.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:f79b231bf5c16b1f39c7f4875e1ded36abee1591e98742b05d8a0fb55d8a3eec"}, + {file = "numpy-1.26.2-cp312-cp312-win32.whl", hash = "sha256:4a06263321dfd3598cacb252f51e521a8cb4b6df471bb12a7ee5cbab20ea9167"}, + {file = "numpy-1.26.2-cp312-cp312-win_amd64.whl", hash = "sha256:b04f5dc6b3efdaab541f7857351aac359e6ae3c126e2edb376929bd3b7f92d7e"}, + {file = "numpy-1.26.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4eb8df4bf8d3d90d091e0146f6c28492b0be84da3e409ebef54349f71ed271ef"}, + {file = "numpy-1.26.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1a13860fdcd95de7cf58bd6f8bc5a5ef81c0b0625eb2c9a783948847abbef2c2"}, + {file = "numpy-1.26.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:64308ebc366a8ed63fd0bf426b6a9468060962f1a4339ab1074c228fa6ade8e3"}, + {file = "numpy-1.26.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baf8aab04a2c0e859da118f0b38617e5ee65d75b83795055fb66c0d5e9e9b818"}, + {file = "numpy-1.26.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d73a3abcac238250091b11caef9ad12413dab01669511779bc9b29261dd50210"}, + {file = "numpy-1.26.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:b361d369fc7e5e1714cf827b731ca32bff8d411212fccd29ad98ad622449cc36"}, + {file = "numpy-1.26.2-cp39-cp39-win32.whl", hash = "sha256:bd3f0091e845164a20bd5a326860c840fe2af79fa12e0469a12768a3ec578d80"}, + {file = "numpy-1.26.2-cp39-cp39-win_amd64.whl", hash = "sha256:2beef57fb031dcc0dc8fa4fe297a742027b954949cabb52a2a376c144e5e6060"}, + {file = "numpy-1.26.2-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:1cc3d5029a30fb5f06704ad6b23b35e11309491c999838c31f124fee32107c79"}, + {file = "numpy-1.26.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94cc3c222bb9fb5a12e334d0479b97bb2df446fbe622b470928f5284ffca3f8d"}, + {file = "numpy-1.26.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:fe6b44fb8fcdf7eda4ef4461b97b3f63c466b27ab151bec2366db8b197387841"}, + {file = "numpy-1.26.2.tar.gz", hash = "sha256:f65738447676ab5777f11e6bbbdb8ce11b785e105f690bc45966574816b6d3ea"}, +] + +[[package]] +name = "packaging" +version = "23.2" +description = "Core utilities for Python packages" +optional = false +python-versions = ">=3.7" +files = [ + {file = "packaging-23.2-py3-none-any.whl", hash = "sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7"}, + {file = "packaging-23.2.tar.gz", hash = "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5"}, +] + +[[package]] +name = "pbr" +version = "6.0.0" +description = "Python Build Reasonableness" +optional = false +python-versions = ">=2.6" +files = [ + {file = "pbr-6.0.0-py2.py3-none-any.whl", hash = "sha256:4a7317d5e3b17a3dccb6a8cfe67dab65b20551404c52c8ed41279fa4f0cb4cda"}, + {file = "pbr-6.0.0.tar.gz", hash = "sha256:d1377122a5a00e2f940ee482999518efe16d745d423a670c27773dfbc3c9a7d9"}, +] + +[[package]] +name = "platformdirs" +version = "4.1.0" +description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." +optional = false +python-versions = ">=3.8" +files = [ + {file = "platformdirs-4.1.0-py3-none-any.whl", hash = "sha256:11c8f37bcca40db96d8144522d925583bdb7a31f7b0e37e3ed4318400a8e2380"}, + {file = "platformdirs-4.1.0.tar.gz", hash = "sha256:906d548203468492d432bcb294d4bc2fff751bf84971fbb2c10918cc206ee420"}, +] + +[package.extras] +docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.1)", "sphinx-autodoc-typehints (>=1.24)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4)", "pytest-cov (>=4.1)", "pytest-mock (>=3.11.1)"] + +[[package]] +name = "pluggy" +version = "1.3.0" +description = "plugin and hook calling mechanisms for python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pluggy-1.3.0-py3-none-any.whl", hash = "sha256:d89c696a773f8bd377d18e5ecda92b7a3793cbe66c87060a6fb58c7b6e1061f7"}, + {file = "pluggy-1.3.0.tar.gz", hash = "sha256:cf61ae8f126ac6f7c451172cf30e3e43d3ca77615509771b3a984a0730651e12"}, +] + +[package.extras] +dev = ["pre-commit", "tox"] +testing = ["pytest", "pytest-benchmark"] + +[[package]] +name = "pre-commit" +version = "2.21.0" +description = "A framework for managing and maintaining multi-language pre-commit hooks." +optional = false +python-versions = ">=3.7" +files = [ + {file = "pre_commit-2.21.0-py2.py3-none-any.whl", hash = "sha256:e2f91727039fc39a92f58a588a25b87f936de6567eed4f0e673e0507edc75bad"}, + {file = "pre_commit-2.21.0.tar.gz", hash = "sha256:31ef31af7e474a8d8995027fefdfcf509b5c913ff31f2015b4ec4beb26a6f658"}, +] + +[package.dependencies] +cfgv = ">=2.0.0" +identify = ">=1.0.0" +nodeenv = ">=0.11.1" +pyyaml = ">=5.1" +virtualenv = ">=20.10.0" + +[[package]] +name = "py" +version = "1.11.0" +description = "library with cross-python path, ini-parsing, io, code, log facilities" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +files = [ + {file = "py-1.11.0-py2.py3-none-any.whl", hash = "sha256:607c53218732647dff4acdfcd50cb62615cedf612e72d1724fb1a0cc6405b378"}, + {file = "py-1.11.0.tar.gz", hash = "sha256:51c75c4126074b472f746a24399ad32f6053d1b34b68d2fa41e558e6f4a98719"}, +] + +[[package]] +name = "py-cpuinfo" +version = "9.0.0" +description = "Get CPU info with pure Python" +optional = false +python-versions = "*" +files = [ + {file = "py-cpuinfo-9.0.0.tar.gz", hash = "sha256:3cdbbf3fac90dc6f118bfd64384f309edeadd902d7c8fb17f02ffa1fc3f49690"}, + {file = "py_cpuinfo-9.0.0-py3-none-any.whl", hash = "sha256:859625bc251f64e21f077d099d4162689c762b5d6a4c3c97553d56241c9674d5"}, +] + +[[package]] +name = "pydantic" +version = "2.3.0" +description = "Data validation using Python type hints" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pydantic-2.3.0-py3-none-any.whl", hash = "sha256:45b5e446c6dfaad9444819a293b921a40e1db1aa61ea08aede0522529ce90e81"}, + {file = "pydantic-2.3.0.tar.gz", hash = "sha256:1607cc106602284cd4a00882986570472f193fde9cb1259bceeaedb26aa79a6d"}, +] + +[package.dependencies] +annotated-types = ">=0.4.0" +pydantic-core = "2.6.3" +typing-extensions = ">=4.6.1" + +[package.extras] +email = ["email-validator (>=2.0.0)"] + +[[package]] +name = "pydantic-core" +version = "2.6.3" +description = "" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pydantic_core-2.6.3-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:1a0ddaa723c48af27d19f27f1c73bdc615c73686d763388c8683fe34ae777bad"}, + {file = "pydantic_core-2.6.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5cfde4fab34dd1e3a3f7f3db38182ab6c95e4ea91cf322242ee0be5c2f7e3d2f"}, + {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5493a7027bfc6b108e17c3383959485087d5942e87eb62bbac69829eae9bc1f7"}, + {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:84e87c16f582f5c753b7f39a71bd6647255512191be2d2dbf49458c4ef024588"}, + {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:522a9c4a4d1924facce7270c84b5134c5cabcb01513213662a2e89cf28c1d309"}, + {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aaafc776e5edc72b3cad1ccedb5fd869cc5c9a591f1213aa9eba31a781be9ac1"}, + {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a750a83b2728299ca12e003d73d1264ad0440f60f4fc9cee54acc489249b728"}, + {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9e8b374ef41ad5c461efb7a140ce4730661aadf85958b5c6a3e9cf4e040ff4bb"}, + {file = "pydantic_core-2.6.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b594b64e8568cf09ee5c9501ede37066b9fc41d83d58f55b9952e32141256acd"}, + {file = "pydantic_core-2.6.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:2a20c533cb80466c1d42a43a4521669ccad7cf2967830ac62c2c2f9cece63e7e"}, + {file = "pydantic_core-2.6.3-cp310-none-win32.whl", hash = "sha256:04fe5c0a43dec39aedba0ec9579001061d4653a9b53a1366b113aca4a3c05ca7"}, + {file = "pydantic_core-2.6.3-cp310-none-win_amd64.whl", hash = "sha256:6bf7d610ac8f0065a286002a23bcce241ea8248c71988bda538edcc90e0c39ad"}, + {file = "pydantic_core-2.6.3-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:6bcc1ad776fffe25ea5c187a028991c031a00ff92d012ca1cc4714087e575973"}, + {file = "pydantic_core-2.6.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:df14f6332834444b4a37685810216cc8fe1fe91f447332cd56294c984ecbff1c"}, + {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0b7486d85293f7f0bbc39b34e1d8aa26210b450bbd3d245ec3d732864009819"}, + {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a892b5b1871b301ce20d40b037ffbe33d1407a39639c2b05356acfef5536d26a"}, + {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:883daa467865e5766931e07eb20f3e8152324f0adf52658f4d302242c12e2c32"}, + {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d4eb77df2964b64ba190eee00b2312a1fd7a862af8918ec70fc2d6308f76ac64"}, + {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ce8c84051fa292a5dc54018a40e2a1926fd17980a9422c973e3ebea017aa8da"}, + {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:22134a4453bd59b7d1e895c455fe277af9d9d9fbbcb9dc3f4a97b8693e7e2c9b"}, + {file = "pydantic_core-2.6.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:02e1c385095efbd997311d85c6021d32369675c09bcbfff3b69d84e59dc103f6"}, + {file = "pydantic_core-2.6.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d79f1f2f7ebdb9b741296b69049ff44aedd95976bfee38eb4848820628a99b50"}, + {file = "pydantic_core-2.6.3-cp311-none-win32.whl", hash = "sha256:430ddd965ffd068dd70ef4e4d74f2c489c3a313adc28e829dd7262cc0d2dd1e8"}, + {file = "pydantic_core-2.6.3-cp311-none-win_amd64.whl", hash = "sha256:84f8bb34fe76c68c9d96b77c60cef093f5e660ef8e43a6cbfcd991017d375950"}, + {file = "pydantic_core-2.6.3-cp311-none-win_arm64.whl", hash = "sha256:5a2a3c9ef904dcdadb550eedf3291ec3f229431b0084666e2c2aa8ff99a103a2"}, + {file = "pydantic_core-2.6.3-cp312-cp312-macosx_10_7_x86_64.whl", hash = "sha256:8421cf496e746cf8d6b677502ed9a0d1e4e956586cd8b221e1312e0841c002d5"}, + {file = "pydantic_core-2.6.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:bb128c30cf1df0ab78166ded1ecf876620fb9aac84d2413e8ea1594b588c735d"}, + {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37a822f630712817b6ecc09ccc378192ef5ff12e2c9bae97eb5968a6cdf3b862"}, + {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:240a015102a0c0cc8114f1cba6444499a8a4d0333e178bc504a5c2196defd456"}, + {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3f90e5e3afb11268628c89f378f7a1ea3f2fe502a28af4192e30a6cdea1e7d5e"}, + {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:340e96c08de1069f3d022a85c2a8c63529fd88709468373b418f4cf2c949fb0e"}, + {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1480fa4682e8202b560dcdc9eeec1005f62a15742b813c88cdc01d44e85308e5"}, + {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f14546403c2a1d11a130b537dda28f07eb6c1805a43dae4617448074fd49c282"}, + {file = "pydantic_core-2.6.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:a87c54e72aa2ef30189dc74427421e074ab4561cf2bf314589f6af5b37f45e6d"}, + {file = "pydantic_core-2.6.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:f93255b3e4d64785554e544c1c76cd32f4a354fa79e2eeca5d16ac2e7fdd57aa"}, + {file = "pydantic_core-2.6.3-cp312-none-win32.whl", hash = "sha256:f70dc00a91311a1aea124e5f64569ea44c011b58433981313202c46bccbec0e1"}, + {file = "pydantic_core-2.6.3-cp312-none-win_amd64.whl", hash = "sha256:23470a23614c701b37252618e7851e595060a96a23016f9a084f3f92f5ed5881"}, + {file = "pydantic_core-2.6.3-cp312-none-win_arm64.whl", hash = "sha256:1ac1750df1b4339b543531ce793b8fd5c16660a95d13aecaab26b44ce11775e9"}, + {file = "pydantic_core-2.6.3-cp37-cp37m-macosx_10_7_x86_64.whl", hash = "sha256:a53e3195f134bde03620d87a7e2b2f2046e0e5a8195e66d0f244d6d5b2f6d31b"}, + {file = "pydantic_core-2.6.3-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:f2969e8f72c6236c51f91fbb79c33821d12a811e2a94b7aa59c65f8dbdfad34a"}, + {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:672174480a85386dd2e681cadd7d951471ad0bb028ed744c895f11f9d51b9ebe"}, + {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:002d0ea50e17ed982c2d65b480bd975fc41086a5a2f9c924ef8fc54419d1dea3"}, + {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3ccc13afee44b9006a73d2046068d4df96dc5b333bf3509d9a06d1b42db6d8bf"}, + {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:439a0de139556745ae53f9cc9668c6c2053444af940d3ef3ecad95b079bc9987"}, + {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d63b7545d489422d417a0cae6f9898618669608750fc5e62156957e609e728a5"}, + {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b44c42edc07a50a081672e25dfe6022554b47f91e793066a7b601ca290f71e42"}, + {file = "pydantic_core-2.6.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1c721bfc575d57305dd922e6a40a8fe3f762905851d694245807a351ad255c58"}, + {file = "pydantic_core-2.6.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:5e4a2cf8c4543f37f5dc881de6c190de08096c53986381daebb56a355be5dfe6"}, + {file = "pydantic_core-2.6.3-cp37-none-win32.whl", hash = "sha256:d9b4916b21931b08096efed090327f8fe78e09ae8f5ad44e07f5c72a7eedb51b"}, + {file = "pydantic_core-2.6.3-cp37-none-win_amd64.whl", hash = "sha256:a8acc9dedd304da161eb071cc7ff1326aa5b66aadec9622b2574ad3ffe225525"}, + {file = "pydantic_core-2.6.3-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:5e9c068f36b9f396399d43bfb6defd4cc99c36215f6ff33ac8b9c14ba15bdf6b"}, + {file = "pydantic_core-2.6.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e61eae9b31799c32c5f9b7be906be3380e699e74b2db26c227c50a5fc7988698"}, + {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d85463560c67fc65cd86153a4975d0b720b6d7725cf7ee0b2d291288433fc21b"}, + {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9616567800bdc83ce136e5847d41008a1d602213d024207b0ff6cab6753fe645"}, + {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9e9b65a55bbabda7fccd3500192a79f6e474d8d36e78d1685496aad5f9dbd92c"}, + {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f468d520f47807d1eb5d27648393519655eadc578d5dd862d06873cce04c4d1b"}, + {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9680dd23055dd874173a3a63a44e7f5a13885a4cfd7e84814be71be24fba83db"}, + {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9a718d56c4d55efcfc63f680f207c9f19c8376e5a8a67773535e6f7e80e93170"}, + {file = "pydantic_core-2.6.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8ecbac050856eb6c3046dea655b39216597e373aa8e50e134c0e202f9c47efec"}, + {file = "pydantic_core-2.6.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:788be9844a6e5c4612b74512a76b2153f1877cd845410d756841f6c3420230eb"}, + {file = "pydantic_core-2.6.3-cp38-none-win32.whl", hash = "sha256:07a1aec07333bf5adebd8264047d3dc518563d92aca6f2f5b36f505132399efc"}, + {file = "pydantic_core-2.6.3-cp38-none-win_amd64.whl", hash = "sha256:621afe25cc2b3c4ba05fff53525156d5100eb35c6e5a7cf31d66cc9e1963e378"}, + {file = "pydantic_core-2.6.3-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:813aab5bfb19c98ae370952b6f7190f1e28e565909bfc219a0909db168783465"}, + {file = "pydantic_core-2.6.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:50555ba3cb58f9861b7a48c493636b996a617db1a72c18da4d7f16d7b1b9952b"}, + {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19e20f8baedd7d987bd3f8005c146e6bcbda7cdeefc36fad50c66adb2dd2da48"}, + {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b0a5d7edb76c1c57b95df719af703e796fc8e796447a1da939f97bfa8a918d60"}, + {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f06e21ad0b504658a3a9edd3d8530e8cea5723f6ea5d280e8db8efc625b47e49"}, + {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ea053cefa008fda40f92aab937fb9f183cf8752e41dbc7bc68917884454c6362"}, + {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:171a4718860790f66d6c2eda1d95dd1edf64f864d2e9f9115840840cf5b5713f"}, + {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5ed7ceca6aba5331ece96c0e328cd52f0dcf942b8895a1ed2642de50800b79d3"}, + {file = "pydantic_core-2.6.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:acafc4368b289a9f291e204d2c4c75908557d4f36bd3ae937914d4529bf62a76"}, + {file = "pydantic_core-2.6.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:1aa712ba150d5105814e53cb141412217146fedc22621e9acff9236d77d2a5ef"}, + {file = "pydantic_core-2.6.3-cp39-none-win32.whl", hash = "sha256:44b4f937b992394a2e81a5c5ce716f3dcc1237281e81b80c748b2da6dd5cf29a"}, + {file = "pydantic_core-2.6.3-cp39-none-win_amd64.whl", hash = "sha256:9b33bf9658cb29ac1a517c11e865112316d09687d767d7a0e4a63d5c640d1b17"}, + {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-macosx_10_7_x86_64.whl", hash = "sha256:d7050899026e708fb185e174c63ebc2c4ee7a0c17b0a96ebc50e1f76a231c057"}, + {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:99faba727727b2e59129c59542284efebbddade4f0ae6a29c8b8d3e1f437beb7"}, + {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5fa159b902d22b283b680ef52b532b29554ea2a7fc39bf354064751369e9dbd7"}, + {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:046af9cfb5384f3684eeb3f58a48698ddab8dd870b4b3f67f825353a14441418"}, + {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:930bfe73e665ebce3f0da2c6d64455098aaa67e1a00323c74dc752627879fc67"}, + {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:85cc4d105747d2aa3c5cf3e37dac50141bff779545ba59a095f4a96b0a460e70"}, + {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:b25afe9d5c4f60dcbbe2b277a79be114e2e65a16598db8abee2a2dcde24f162b"}, + {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:e49ce7dc9f925e1fb010fc3d555250139df61fa6e5a0a95ce356329602c11ea9"}, + {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-macosx_10_7_x86_64.whl", hash = "sha256:2dd50d6a1aef0426a1d0199190c6c43ec89812b1f409e7fe44cb0fbf6dfa733c"}, + {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c6595b0d8c8711e8e1dc389d52648b923b809f68ac1c6f0baa525c6440aa0daa"}, + {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4ef724a059396751aef71e847178d66ad7fc3fc969a1a40c29f5aac1aa5f8784"}, + {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3c8945a105f1589ce8a693753b908815e0748f6279959a4530f6742e1994dcb6"}, + {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:c8c6660089a25d45333cb9db56bb9e347241a6d7509838dbbd1931d0e19dbc7f"}, + {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:692b4ff5c4e828a38716cfa92667661a39886e71136c97b7dac26edef18767f7"}, + {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:f1a5d8f18877474c80b7711d870db0eeef9442691fcdb00adabfc97e183ee0b0"}, + {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:3796a6152c545339d3b1652183e786df648ecdf7c4f9347e1d30e6750907f5bb"}, + {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:b962700962f6e7a6bd77e5f37320cabac24b4c0f76afeac05e9f93cf0c620014"}, + {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:56ea80269077003eaa59723bac1d8bacd2cd15ae30456f2890811efc1e3d4413"}, + {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75c0ebbebae71ed1e385f7dfd9b74c1cff09fed24a6df43d326dd7f12339ec34"}, + {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:252851b38bad3bfda47b104ffd077d4f9604a10cb06fe09d020016a25107bf98"}, + {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:6656a0ae383d8cd7cc94e91de4e526407b3726049ce8d7939049cbfa426518c8"}, + {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:d9140ded382a5b04a1c030b593ed9bf3088243a0a8b7fa9f071a5736498c5483"}, + {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:d38bbcef58220f9c81e42c255ef0bf99735d8f11edef69ab0b499da77105158a"}, + {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:c9d469204abcca28926cbc28ce98f28e50e488767b084fb3fbdf21af11d3de26"}, + {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:48c1ed8b02ffea4d5c9c220eda27af02b8149fe58526359b3c07eb391cb353a2"}, + {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b2b1bfed698fa410ab81982f681f5b1996d3d994ae8073286515ac4d165c2e7"}, + {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf9d42a71a4d7a7c1f14f629e5c30eac451a6fc81827d2beefd57d014c006c4a"}, + {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4292ca56751aebbe63a84bbfc3b5717abb09b14d4b4442cc43fd7c49a1529efd"}, + {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:7dc2ce039c7290b4ef64334ec7e6ca6494de6eecc81e21cb4f73b9b39991408c"}, + {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:615a31b1629e12445c0e9fc8339b41aaa6cc60bd53bf802d5fe3d2c0cda2ae8d"}, + {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:1fa1f6312fb84e8c281f32b39affe81984ccd484da6e9d65b3d18c202c666149"}, + {file = "pydantic_core-2.6.3.tar.gz", hash = "sha256:1508f37ba9e3ddc0189e6ff4e2228bd2d3c3a4641cbe8c07177162f76ed696c7"}, +] + +[package.dependencies] +typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" + +[[package]] +name = "pydoclint" +version = "0.3.8" +description = "A Python docstring linter that checks arguments, returns, yields, and raises sections" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pydoclint-0.3.8-py2.py3-none-any.whl", hash = "sha256:8e5e020071bb64056fd3f1d68f3b1162ffeb8a3fd6424f73fef7272dac62c166"}, + {file = "pydoclint-0.3.8.tar.gz", hash = "sha256:5a9686a5fb410343e998402686b87cc07df647ea3ab92528c0b0cf8505584e44"}, +] + +[package.dependencies] +click = ">=8.0.0" +docstring-parser-fork = ">=0.0.5" +tomli = {version = ">=2.0.1", markers = "python_version < \"3.11\""} + +[package.extras] +flake8 = ["flake8 (>=4)"] + +[[package]] +name = "pydocstyle" +version = "6.3.0" +description = "Python docstring style checker" +optional = false +python-versions = ">=3.6" +files = [ + {file = "pydocstyle-6.3.0-py3-none-any.whl", hash = "sha256:118762d452a49d6b05e194ef344a55822987a462831ade91ec5c06fd2169d019"}, + {file = "pydocstyle-6.3.0.tar.gz", hash = "sha256:7ce43f0c0ac87b07494eb9c0b462c0b73e6ff276807f204d6b53edc72b7e44e1"}, +] + +[package.dependencies] +snowballstemmer = ">=2.2.0" +tomli = {version = ">=1.2.3", optional = true, markers = "python_version < \"3.11\" and extra == \"toml\""} + +[package.extras] +toml = ["tomli (>=1.2.3)"] + +[[package]] +name = "pygments" +version = "2.17.2" +description = "Pygments is a syntax highlighting package written in Python." +optional = false +python-versions = ">=3.7" +files = [ + {file = "pygments-2.17.2-py3-none-any.whl", hash = "sha256:b27c2826c47d0f3219f29554824c30c5e8945175d888647acd804ddd04af846c"}, + {file = "pygments-2.17.2.tar.gz", hash = "sha256:da46cec9fd2de5be3a8a784f434e4c4ab670b4ff54d605c4c2717e9d49c4c367"}, +] + +[package.extras] +plugins = ["importlib-metadata"] +windows-terminal = ["colorama (>=0.4.6)"] + +[[package]] +name = "pytest" +version = "7.4.3" +description = "pytest: simple powerful testing with Python" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pytest-7.4.3-py3-none-any.whl", hash = "sha256:0d009c083ea859a71b76adf7c1d502e4bc170b80a8ef002da5806527b9591fac"}, + {file = "pytest-7.4.3.tar.gz", hash = "sha256:d989d136982de4e3b29dabcc838ad581c64e8ed52c11fbe86ddebd9da0818cd5"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "sys_platform == \"win32\""} +exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} +iniconfig = "*" +packaging = "*" +pluggy = ">=0.12,<2.0" +tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""} + +[package.extras] +testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] + +[[package]] +name = "pytest-benchmark" +version = "4.0.0" +description = "A ``pytest`` fixture for benchmarking code. It will group the tests into rounds that are calibrated to the chosen timer." +optional = false +python-versions = ">=3.7" +files = [ + {file = "pytest-benchmark-4.0.0.tar.gz", hash = "sha256:fb0785b83efe599a6a956361c0691ae1dbb5318018561af10f3e915caa0048d1"}, + {file = "pytest_benchmark-4.0.0-py3-none-any.whl", hash = "sha256:fdb7db64e31c8b277dff9850d2a2556d8b60bcb0ea6524e36e28ffd7c87f71d6"}, +] + +[package.dependencies] +py-cpuinfo = "*" +pytest = ">=3.8" + +[package.extras] +aspect = ["aspectlib"] +elasticsearch = ["elasticsearch"] +histogram = ["pygal", "pygaljs"] + +[[package]] +name = "pytest-click" +version = "1.1.0" +description = "Pytest plugin for Click" +optional = false +python-versions = "*" +files = [ + {file = "pytest_click-1.1.0-py3-none-any.whl", hash = "sha256:eade4742c2f02c345e78a32534a43e8db04acf98d415090539dacc880b7cd0e9"}, + {file = "pytest_click-1.1.0.tar.gz", hash = "sha256:fdd9f6721f877dda021e7c5dc73e70aecd37e5ed23ec6820f8a7b3fd7b4f8d30"}, +] + +[package.dependencies] +click = ">=6.0" +pytest = ">=5.0" + +[[package]] +name = "pytest-cov" +version = "4.1.0" +description = "Pytest plugin for measuring coverage." +optional = false +python-versions = ">=3.7" +files = [ + {file = "pytest-cov-4.1.0.tar.gz", hash = "sha256:3904b13dfbfec47f003b8e77fd5b589cd11904a21ddf1ab38a64f204d6a10ef6"}, + {file = "pytest_cov-4.1.0-py3-none-any.whl", hash = "sha256:6ba70b9e97e69fcc3fb45bfeab2d0a138fb65c4d0d6a41ef33983ad114be8c3a"}, +] + +[package.dependencies] +coverage = {version = ">=5.2.1", extras = ["toml"]} +pytest = ">=4.6" + +[package.extras] +testing = ["fields", "hunter", "process-tests", "pytest-xdist", "six", "virtualenv"] + +[[package]] +name = "pytest-html" +version = "3.2.0" +description = "pytest plugin for generating HTML reports" +optional = false +python-versions = ">=3.6" +files = [ + {file = "pytest-html-3.2.0.tar.gz", hash = "sha256:c4e2f4bb0bffc437f51ad2174a8a3e71df81bbc2f6894604e604af18fbe687c3"}, + {file = "pytest_html-3.2.0-py3-none-any.whl", hash = "sha256:868c08564a68d8b2c26866f1e33178419bb35b1e127c33784a28622eb827f3f3"}, +] + +[package.dependencies] +py = ">=1.8.2" +pytest = ">=5.0,<6.0.0 || >6.0.0" +pytest-metadata = "*" + +[[package]] +name = "pytest-metadata" +version = "3.0.0" +description = "pytest plugin for test session metadata" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pytest_metadata-3.0.0-py3-none-any.whl", hash = "sha256:a17b1e40080401dc23177599208c52228df463db191c1a573ccdffacd885e190"}, + {file = "pytest_metadata-3.0.0.tar.gz", hash = "sha256:769a9c65d2884bd583bc626b0ace77ad15dbe02dd91a9106d47fd46d9c2569ca"}, +] + +[package.dependencies] +pytest = ">=7.0.0" + +[package.extras] +test = ["black (>=22.1.0)", "flake8 (>=4.0.1)", "pre-commit (>=2.17.0)", "tox (>=3.24.5)"] + +[[package]] +name = "pytest-mock" +version = "3.12.0" +description = "Thin-wrapper around the mock package for easier use with pytest" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pytest-mock-3.12.0.tar.gz", hash = "sha256:31a40f038c22cad32287bb43932054451ff5583ff094bca6f675df2f8bc1a6e9"}, + {file = "pytest_mock-3.12.0-py3-none-any.whl", hash = "sha256:0972719a7263072da3a21c7f4773069bcc7486027d7e8e1f81d98a47e701bc4f"}, +] + +[package.dependencies] +pytest = ">=5.0" + +[package.extras] +dev = ["pre-commit", "pytest-asyncio", "tox"] + +[[package]] +name = "pytest-pikachu" +version = "1.0.0" +description = "Show surprise when tests are passing" +optional = false +python-versions = "*" +files = [ + {file = "pytest-pikachu-1.0.0.tar.gz", hash = "sha256:8acd13fdc51491e86aff5106cfaa31f80f4584ac41dcc3ae512d471c18333fd7"}, + {file = "pytest_pikachu-1.0.0-py3-none-any.whl", hash = "sha256:c20cfe20a84978e11e69af24f7a9d07beb90cbca805ae5011e2061c14a486eb6"}, +] + +[package.dependencies] +pytest = "*" + +[[package]] +name = "pytest-sugar" +version = "0.9.7" +description = "pytest-sugar is a plugin for pytest that changes the default look and feel of pytest (e.g. progressbar, show tests that fail instantly)." +optional = false +python-versions = "*" +files = [ + {file = "pytest-sugar-0.9.7.tar.gz", hash = "sha256:f1e74c1abfa55f7241cf7088032b6e378566f16b938f3f08905e2cf4494edd46"}, + {file = "pytest_sugar-0.9.7-py2.py3-none-any.whl", hash = "sha256:8cb5a4e5f8bbcd834622b0235db9e50432f4cbd71fef55b467fe44e43701e062"}, +] + +[package.dependencies] +packaging = ">=21.3" +pytest = ">=6.2.0" +termcolor = ">=2.1.0" + +[package.extras] +dev = ["black", "flake8", "pre-commit"] + +[[package]] +name = "pytest-timeout" +version = "2.2.0" +description = "pytest plugin to abort hanging tests" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pytest-timeout-2.2.0.tar.gz", hash = "sha256:3b0b95dabf3cb50bac9ef5ca912fa0cfc286526af17afc806824df20c2f72c90"}, + {file = "pytest_timeout-2.2.0-py3-none-any.whl", hash = "sha256:bde531e096466f49398a59f2dde76fa78429a09a12411466f88a07213e220de2"}, +] + +[package.dependencies] +pytest = ">=5.0.0" + +[[package]] +name = "pyyaml" +version = "6.0.1" +description = "YAML parser and emitter for Python" +optional = false +python-versions = ">=3.6" +files = [ + {file = "PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a"}, + {file = "PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"}, + {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, + {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, + {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, + {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, + {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, + {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, + {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, + {file = "PyYAML-6.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab"}, + {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, + {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, + {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, + {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, + {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, + {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, + {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, + {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, + {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, + {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, + {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, + {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, + {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, + {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd"}, + {file = "PyYAML-6.0.1-cp36-cp36m-win32.whl", hash = "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585"}, + {file = "PyYAML-6.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa"}, + {file = "PyYAML-6.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3"}, + {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27"}, + {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3"}, + {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c"}, + {file = "PyYAML-6.0.1-cp37-cp37m-win32.whl", hash = "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba"}, + {file = "PyYAML-6.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867"}, + {file = "PyYAML-6.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595"}, + {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, + {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, + {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, + {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, + {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, + {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, + {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, + {file = "PyYAML-6.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859"}, + {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, + {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, + {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, + {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, + {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, + {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, + {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, +] + +[[package]] +name = "requests" +version = "2.31.0" +description = "Python HTTP for Humans." +optional = false +python-versions = ">=3.7" +files = [ + {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"}, + {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"}, +] + +[package.dependencies] +certifi = ">=2017.4.17" +charset-normalizer = ">=2,<4" +idna = ">=2.5,<4" +urllib3 = ">=1.21.1,<3" + +[package.extras] +socks = ["PySocks (>=1.5.6,!=1.5.7)"] +use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] + +[[package]] +name = "rich" +version = "12.6.0" +description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" +optional = false +python-versions = ">=3.6.3,<4.0.0" +files = [ + {file = "rich-12.6.0-py3-none-any.whl", hash = "sha256:a4eb26484f2c82589bd9a17c73d32a010b1e29d89f1604cd9bf3a2097b81bb5e"}, + {file = "rich-12.6.0.tar.gz", hash = "sha256:ba3a3775974105c221d31141f2c116f4fd65c5ceb0698657a11e9f295ec93fd0"}, +] + +[package.dependencies] +commonmark = ">=0.9.0,<0.10.0" +pygments = ">=2.6.0,<3.0.0" + +[package.extras] +jupyter = ["ipywidgets (>=7.5.1,<8.0.0)"] + +[[package]] +name = "ruamel-yaml" +version = "0.18.5" +description = "ruamel.yaml is a YAML parser/emitter that supports roundtrip preservation of comments, seq/map flow style, and map key order" +optional = false +python-versions = ">=3.7" +files = [ + {file = "ruamel.yaml-0.18.5-py3-none-any.whl", hash = "sha256:a013ac02f99a69cdd6277d9664689eb1acba07069f912823177c5eced21a6ada"}, + {file = "ruamel.yaml-0.18.5.tar.gz", hash = "sha256:61917e3a35a569c1133a8f772e1226961bf5a1198bea7e23f06a0841dea1ab0e"}, +] + +[package.dependencies] +"ruamel.yaml.clib" = {version = ">=0.2.7", markers = "platform_python_implementation == \"CPython\" and python_version < \"3.13\""} + +[package.extras] +docs = ["mercurial (>5.7)", "ryd"] +jinja2 = ["ruamel.yaml.jinja2 (>=0.2)"] + +[[package]] +name = "ruamel-yaml-clib" +version = "0.2.8" +description = "C version of reader, parser and emitter for ruamel.yaml derived from libyaml" +optional = false +python-versions = ">=3.6" +files = [ + {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:b42169467c42b692c19cf539c38d4602069d8c1505e97b86387fcf7afb766e1d"}, + {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-macosx_13_0_arm64.whl", hash = "sha256:07238db9cbdf8fc1e9de2489a4f68474e70dffcb32232db7c08fa61ca0c7c462"}, + {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:fff3573c2db359f091e1589c3d7c5fc2f86f5bdb6f24252c2d8e539d4e45f412"}, + {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-manylinux_2_24_aarch64.whl", hash = "sha256:aa2267c6a303eb483de8d02db2871afb5c5fc15618d894300b88958f729ad74f"}, + {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:840f0c7f194986a63d2c2465ca63af8ccbbc90ab1c6001b1978f05119b5e7334"}, + {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:024cfe1fc7c7f4e1aff4a81e718109e13409767e4f871443cbff3dba3578203d"}, + {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-win32.whl", hash = "sha256:c69212f63169ec1cfc9bb44723bf2917cbbd8f6191a00ef3410f5a7fe300722d"}, + {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-win_amd64.whl", hash = "sha256:cabddb8d8ead485e255fe80429f833172b4cadf99274db39abc080e068cbcc31"}, + {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:bef08cd86169d9eafb3ccb0a39edb11d8e25f3dae2b28f5c52fd997521133069"}, + {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-macosx_13_0_arm64.whl", hash = "sha256:b16420e621d26fdfa949a8b4b47ade8810c56002f5389970db4ddda51dbff248"}, + {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:25c515e350e5b739842fc3228d662413ef28f295791af5e5110b543cf0b57d9b"}, + {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-manylinux_2_24_aarch64.whl", hash = "sha256:1707814f0d9791df063f8c19bb51b0d1278b8e9a2353abbb676c2f685dee6afe"}, + {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:46d378daaac94f454b3a0e3d8d78cafd78a026b1d71443f4966c696b48a6d899"}, + {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:09b055c05697b38ecacb7ac50bdab2240bfca1a0c4872b0fd309bb07dc9aa3a9"}, + {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-win32.whl", hash = "sha256:53a300ed9cea38cf5a2a9b069058137c2ca1ce658a874b79baceb8f892f915a7"}, + {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-win_amd64.whl", hash = "sha256:c2a72e9109ea74e511e29032f3b670835f8a59bbdc9ce692c5b4ed91ccf1eedb"}, + {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:ebc06178e8821efc9692ea7544aa5644217358490145629914d8020042c24aa1"}, + {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-macosx_13_0_arm64.whl", hash = "sha256:edaef1c1200c4b4cb914583150dcaa3bc30e592e907c01117c08b13a07255ec2"}, + {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d176b57452ab5b7028ac47e7b3cf644bcfdc8cacfecf7e71759f7f51a59e5c92"}, + {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-manylinux_2_24_aarch64.whl", hash = "sha256:1dc67314e7e1086c9fdf2680b7b6c2be1c0d8e3a8279f2e993ca2a7545fecf62"}, + {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:3213ece08ea033eb159ac52ae052a4899b56ecc124bb80020d9bbceeb50258e9"}, + {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:aab7fd643f71d7946f2ee58cc88c9b7bfc97debd71dcc93e03e2d174628e7e2d"}, + {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-win32.whl", hash = "sha256:5c365d91c88390c8d0a8545df0b5857172824b1c604e867161e6b3d59a827eaa"}, + {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-win_amd64.whl", hash = "sha256:1758ce7d8e1a29d23de54a16ae867abd370f01b5a69e1a3ba75223eaa3ca1a1b"}, + {file = "ruamel.yaml.clib-0.2.8-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a5aa27bad2bb83670b71683aae140a1f52b0857a2deff56ad3f6c13a017a26ed"}, + {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c58ecd827313af6864893e7af0a3bb85fd529f862b6adbefe14643947cfe2942"}, + {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-macosx_12_0_arm64.whl", hash = "sha256:f481f16baec5290e45aebdc2a5168ebc6d35189ae6fea7a58787613a25f6e875"}, + {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-manylinux_2_24_aarch64.whl", hash = "sha256:77159f5d5b5c14f7c34073862a6b7d34944075d9f93e681638f6d753606c6ce6"}, + {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:7f67a1ee819dc4562d444bbafb135832b0b909f81cc90f7aa00260968c9ca1b3"}, + {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:4ecbf9c3e19f9562c7fdd462e8d18dd902a47ca046a2e64dba80699f0b6c09b7"}, + {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:87ea5ff66d8064301a154b3933ae406b0863402a799b16e4a1d24d9fbbcbe0d3"}, + {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-win32.whl", hash = "sha256:75e1ed13e1f9de23c5607fe6bd1aeaae21e523b32d83bb33918245361e9cc51b"}, + {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-win_amd64.whl", hash = "sha256:3f215c5daf6a9d7bbed4a0a4f760f3113b10e82ff4c5c44bec20a68c8014f675"}, + {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1b617618914cb00bf5c34d4357c37aa15183fa229b24767259657746c9077615"}, + {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:a6a9ffd280b71ad062eae53ac1659ad86a17f59a0fdc7699fd9be40525153337"}, + {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-manylinux_2_24_aarch64.whl", hash = "sha256:305889baa4043a09e5b76f8e2a51d4ffba44259f6b4c72dec8ca56207d9c6fe1"}, + {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:700e4ebb569e59e16a976857c8798aee258dceac7c7d6b50cab63e080058df91"}, + {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:e2b4c44b60eadec492926a7270abb100ef9f72798e18743939bdbf037aab8c28"}, + {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:e79e5db08739731b0ce4850bed599235d601701d5694c36570a99a0c5ca41a9d"}, + {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-win32.whl", hash = "sha256:955eae71ac26c1ab35924203fda6220f84dce57d6d7884f189743e2abe3a9fbe"}, + {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-win_amd64.whl", hash = "sha256:56f4252222c067b4ce51ae12cbac231bce32aee1d33fbfc9d17e5b8d6966c312"}, + {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:03d1162b6d1df1caa3a4bd27aa51ce17c9afc2046c31b0ad60a0a96ec22f8001"}, + {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:bba64af9fa9cebe325a62fa398760f5c7206b215201b0ec825005f1b18b9bccf"}, + {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-manylinux_2_24_aarch64.whl", hash = "sha256:a1a45e0bb052edf6a1d3a93baef85319733a888363938e1fc9924cb00c8df24c"}, + {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:da09ad1c359a728e112d60116f626cc9f29730ff3e0e7db72b9a2dbc2e4beed5"}, + {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:184565012b60405d93838167f425713180b949e9d8dd0bbc7b49f074407c5a8b"}, + {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a75879bacf2c987c003368cf14bed0ffe99e8e85acfa6c0bfffc21a090f16880"}, + {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-win32.whl", hash = "sha256:84b554931e932c46f94ab306913ad7e11bba988104c5cff26d90d03f68258cd5"}, + {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-win_amd64.whl", hash = "sha256:25ac8c08322002b06fa1d49d1646181f0b2c72f5cbc15a85e80b4c30a544bb15"}, + {file = "ruamel.yaml.clib-0.2.8.tar.gz", hash = "sha256:beb2e0404003de9a4cab9753a8805a8fe9320ee6673136ed7f04255fe60bb512"}, +] + +[[package]] +name = "ruff" +version = "0.1.7" +description = "An extremely fast Python linter and code formatter, written in Rust." +optional = false +python-versions = ">=3.7" +files = [ + {file = "ruff-0.1.7-py3-none-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:7f80496854fdc65b6659c271d2c26e90d4d401e6a4a31908e7e334fab4645aac"}, + {file = "ruff-0.1.7-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:1ea109bdb23c2a4413f397ebd8ac32cb498bee234d4191ae1a310af760e5d287"}, + {file = "ruff-0.1.7-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b0c2de9dd9daf5e07624c24add25c3a490dbf74b0e9bca4145c632457b3b42a"}, + {file = "ruff-0.1.7-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:69a4bed13bc1d5dabf3902522b5a2aadfebe28226c6269694283c3b0cecb45fd"}, + {file = "ruff-0.1.7-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:de02ca331f2143195a712983a57137c5ec0f10acc4aa81f7c1f86519e52b92a1"}, + {file = "ruff-0.1.7-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:45b38c3f8788a65e6a2cab02e0f7adfa88872696839d9882c13b7e2f35d64c5f"}, + {file = "ruff-0.1.7-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6c64cb67b2025b1ac6d58e5ffca8f7b3f7fd921f35e78198411237e4f0db8e73"}, + {file = "ruff-0.1.7-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9dcc6bb2f4df59cb5b4b40ff14be7d57012179d69c6565c1da0d1f013d29951b"}, + {file = "ruff-0.1.7-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:df2bb4bb6bbe921f6b4f5b6fdd8d8468c940731cb9406f274ae8c5ed7a78c478"}, + {file = "ruff-0.1.7-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:276a89bcb149b3d8c1b11d91aa81898fe698900ed553a08129b38d9d6570e717"}, + {file = "ruff-0.1.7-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:90c958fe950735041f1c80d21b42184f1072cc3975d05e736e8d66fc377119ea"}, + {file = "ruff-0.1.7-py3-none-musllinux_1_2_i686.whl", hash = "sha256:6b05e3b123f93bb4146a761b7a7d57af8cb7384ccb2502d29d736eaade0db519"}, + {file = "ruff-0.1.7-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:290ecab680dce94affebefe0bbca2322a6277e83d4f29234627e0f8f6b4fa9ce"}, + {file = "ruff-0.1.7-py3-none-win32.whl", hash = "sha256:416dfd0bd45d1a2baa3b1b07b1b9758e7d993c256d3e51dc6e03a5e7901c7d80"}, + {file = "ruff-0.1.7-py3-none-win_amd64.whl", hash = "sha256:4af95fd1d3b001fc41325064336db36e3d27d2004cdb6d21fd617d45a172dd96"}, + {file = "ruff-0.1.7-py3-none-win_arm64.whl", hash = "sha256:0683b7bfbb95e6df3c7c04fe9d78f631f8e8ba4868dfc932d43d690698057e2e"}, + {file = "ruff-0.1.7.tar.gz", hash = "sha256:dffd699d07abf54833e5f6cc50b85a6ff043715da8788c4a79bcd4ab4734d306"}, +] + +[[package]] +name = "safety" +version = "2.3.4" +description = "Checks installed dependencies for known vulnerabilities and licenses." +optional = false +python-versions = "*" +files = [ + {file = "safety-2.3.4-py3-none-any.whl", hash = "sha256:6224dcd9b20986a2b2c5e7acfdfba6bca42bb11b2783b24ed04f32317e5167ea"}, + {file = "safety-2.3.4.tar.gz", hash = "sha256:b9e74e794e82f54d11f4091c5d820c4d2d81de9f953bf0b4f33ac8bc402ae72c"}, +] + +[package.dependencies] +Click = ">=8.0.2" +dparse = ">=0.6.2" +packaging = ">=21.0" +requests = "*" +"ruamel.yaml" = ">=0.17.21" +setuptools = ">=19.3" + +[package.extras] +github = ["jinja2 (>=3.1.0)", "pygithub (>=1.43.3)"] +gitlab = ["python-gitlab (>=1.3.0)"] + +[[package]] +name = "setuptools" +version = "69.0.2" +description = "Easily download, build, install, upgrade, and uninstall Python packages" +optional = false +python-versions = ">=3.8" +files = [ + {file = "setuptools-69.0.2-py3-none-any.whl", hash = "sha256:1e8fdff6797d3865f37397be788a4e3cba233608e9b509382a2777d25ebde7f2"}, + {file = "setuptools-69.0.2.tar.gz", hash = "sha256:735896e78a4742605974de002ac60562d286fa8051a7e2299445e8e8fbb01aa6"}, +] + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"] +testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-ruff", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] +testing-integration = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "packaging (>=23.1)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"] + +[[package]] +name = "shellingham" +version = "1.5.4" +description = "Tool to Detect Surrounding Shell" +optional = false +python-versions = ">=3.7" +files = [ + {file = "shellingham-1.5.4-py2.py3-none-any.whl", hash = "sha256:7ecfff8f2fd72616f7481040475a65b2bf8af90a56c89140852d1120324e8686"}, + {file = "shellingham-1.5.4.tar.gz", hash = "sha256:8dbca0739d487e5bd35ab3ca4b36e11c4078f3a234bfce294b0a0291363404de"}, +] + +[[package]] +name = "smmap" +version = "5.0.1" +description = "A pure Python implementation of a sliding window memory map manager" +optional = false +python-versions = ">=3.7" +files = [ + {file = "smmap-5.0.1-py3-none-any.whl", hash = "sha256:e6d8668fa5f93e706934a62d7b4db19c8d9eb8cf2adbb75ef1b675aa332b69da"}, + {file = "smmap-5.0.1.tar.gz", hash = "sha256:dceeb6c0028fdb6734471eb07c0cd2aae706ccaecab45965ee83f11c8d3b1f62"}, +] + +[[package]] +name = "snowballstemmer" +version = "2.2.0" +description = "This package provides 29 stemmers for 28 languages generated from Snowball algorithms." +optional = false +python-versions = "*" +files = [ + {file = "snowballstemmer-2.2.0-py2.py3-none-any.whl", hash = "sha256:c8e1716e83cc398ae16824e5572ae04e0d9fc2c6b985fb0f900f5f0c96ecba1a"}, + {file = "snowballstemmer-2.2.0.tar.gz", hash = "sha256:09b16deb8547d3412ad7b590689584cd0fe25ec8db3be37788be3810cbf19cb1"}, +] + +[[package]] +name = "stevedore" +version = "5.1.0" +description = "Manage dynamic plugins for Python applications" +optional = false +python-versions = ">=3.8" +files = [ + {file = "stevedore-5.1.0-py3-none-any.whl", hash = "sha256:8cc040628f3cea5d7128f2e76cf486b2251a4e543c7b938f58d9a377f6694a2d"}, + {file = "stevedore-5.1.0.tar.gz", hash = "sha256:a54534acf9b89bc7ed264807013b505bf07f74dbe4bcfa37d32bd063870b087c"}, +] + +[package.dependencies] +pbr = ">=2.0.0,<2.1.0 || >2.1.0" + +[[package]] +name = "termcolor" +version = "2.4.0" +description = "ANSI color formatting for output in terminal" +optional = false +python-versions = ">=3.8" +files = [ + {file = "termcolor-2.4.0-py3-none-any.whl", hash = "sha256:9297c0df9c99445c2412e832e882a7884038a25617c60cea2ad69488d4040d63"}, + {file = "termcolor-2.4.0.tar.gz", hash = "sha256:aab9e56047c8ac41ed798fa36d892a37aca6b3e9159f3e0c24bc64a9b3ac7b7a"}, +] + +[package.extras] +tests = ["pytest", "pytest-cov"] + +[[package]] +name = "tomli" +version = "2.0.1" +description = "A lil' TOML parser" +optional = false +python-versions = ">=3.7" +files = [ + {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, + {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, +] + +[[package]] +name = "typer" +version = "0.7.0" +description = "Typer, build great CLIs. Easy to code. Based on Python type hints." +optional = false +python-versions = ">=3.6" +files = [ + {file = "typer-0.7.0-py3-none-any.whl", hash = "sha256:b5e704f4e48ec263de1c0b3a2387cd405a13767d2f907f44c1a08cbad96f606d"}, + {file = "typer-0.7.0.tar.gz", hash = "sha256:ff797846578a9f2a201b53442aedeb543319466870fbe1c701eab66dd7681165"}, +] + +[package.dependencies] +click = ">=7.1.1,<9.0.0" +colorama = {version = ">=0.4.3,<0.5.0", optional = true, markers = "extra == \"all\""} +rich = {version = ">=10.11.0,<13.0.0", optional = true, markers = "extra == \"all\""} +shellingham = {version = ">=1.3.0,<2.0.0", optional = true, markers = "extra == \"all\""} + +[package.extras] +all = ["colorama (>=0.4.3,<0.5.0)", "rich (>=10.11.0,<13.0.0)", "shellingham (>=1.3.0,<2.0.0)"] +dev = ["autoflake (>=1.3.1,<2.0.0)", "flake8 (>=3.8.3,<4.0.0)", "pre-commit (>=2.17.0,<3.0.0)"] +doc = ["cairosvg (>=2.5.2,<3.0.0)", "mdx-include (>=1.4.1,<2.0.0)", "mkdocs (>=1.1.2,<2.0.0)", "mkdocs-material (>=8.1.4,<9.0.0)", "pillow (>=9.3.0,<10.0.0)"] +test = ["black (>=22.3.0,<23.0.0)", "coverage (>=6.2,<7.0)", "isort (>=5.0.6,<6.0.0)", "mypy (==0.910)", "pytest (>=4.4.0,<8.0.0)", "pytest-cov (>=2.10.0,<5.0.0)", "pytest-sugar (>=0.9.4,<0.10.0)", "pytest-xdist (>=1.32.0,<4.0.0)", "rich (>=10.11.0,<13.0.0)", "shellingham (>=1.3.0,<2.0.0)"] + +[[package]] +name = "typing-extensions" +version = "4.9.0" +description = "Backported and Experimental Type Hints for Python 3.8+" +optional = false +python-versions = ">=3.8" +files = [ + {file = "typing_extensions-4.9.0-py3-none-any.whl", hash = "sha256:af72aea155e91adfc61c3ae9e0e342dbc0cba726d6cba4b6c72c1f34e47291cd"}, + {file = "typing_extensions-4.9.0.tar.gz", hash = "sha256:23478f88c37f27d76ac8aee6c905017a143b0b1b886c3c9f66bc2fd94f9f5783"}, +] + +[[package]] +name = "urllib3" +version = "2.1.0" +description = "HTTP library with thread-safe connection pooling, file post, and more." +optional = false +python-versions = ">=3.8" +files = [ + {file = "urllib3-2.1.0-py3-none-any.whl", hash = "sha256:55901e917a5896a349ff771be919f8bd99aff50b79fe58fec595eb37bbc56bb3"}, + {file = "urllib3-2.1.0.tar.gz", hash = "sha256:df7aa8afb0148fa78488e7899b2c59b5f4ffcfa82e6c54ccb9dd37c1d7b52d54"}, +] + +[package.extras] +brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] +socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] +zstd = ["zstandard (>=0.18.0)"] + +[[package]] +name = "virtualenv" +version = "20.25.0" +description = "Virtual Python Environment builder" +optional = false +python-versions = ">=3.7" +files = [ + {file = "virtualenv-20.25.0-py3-none-any.whl", hash = "sha256:4238949c5ffe6876362d9c0180fc6c3a824a7b12b80604eeb8085f2ed7460de3"}, + {file = "virtualenv-20.25.0.tar.gz", hash = "sha256:bf51c0d9c7dd63ea8e44086fa1e4fb1093a31e963b86959257378aef020e1f1b"}, +] + +[package.dependencies] +distlib = ">=0.3.7,<1" +filelock = ">=3.12.2,<4" +platformdirs = ">=3.9.1,<5" + +[package.extras] +docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.2)", "sphinx-argparse (>=0.4)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=23.6)"] +test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=23.1)", "pytest (>=7.4)", "pytest-env (>=0.8.2)", "pytest-freezer (>=0.4.8)", "pytest-mock (>=3.11.1)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)", "setuptools (>=68)", "time-machine (>=2.10)"] + +[metadata] +lock-version = "2.0" +python-versions = "^3.10" +content-hash = "269af20479971fc8a9dac3c187aacebf049cf687e91b4806a1b27c48e48fda8d" diff --git a/stac_model/pyproject.toml b/stac_model/pyproject.toml new file mode 100644 index 0000000..f73725b --- /dev/null +++ b/stac_model/pyproject.toml @@ -0,0 +1,201 @@ + +# Poetry pyproject.toml: https://python-poetry.org/docs/pyproject/ +[build-system] +requires = ["poetry_core>=1.0.0"] +build-backend = "poetry.core.masonry.api" + + +[tool.poetry] +name = "stac-model" +version = "0.1.0" +description = "A PydanticV2 validation and serialization libary for the STAC ML Model Extension" +readme = "README.md" +authors = ["Ryan Avery "] +license = "Apache Software License 2.0" +repository = "https://github.com/rbavery/stac-model" +homepage = "https://github.com/rbavery/stac-model" +packages = [ + {include = "stac_model"} +] + + +# Keywords description https://python-poetry.org/docs/pyproject/#keywords +keywords = [] # UPDATEME with relevant keywords + + +# Pypi classifiers: https://pypi.org/classifiers/ +classifiers = [ # UPDATEME with additional classifiers; remove last classifier to allow publishing on PyPI + "Development Status :: 3 - Alpha", + "Intended Audience :: Developers", + "Operating System :: OS Independent", + "Topic :: Software Development :: Libraries :: Python Modules", + "License :: OSI Approved :: Apache Software License", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", +] + + +[tool.poetry.scripts] +# Entry points for the package https://python-poetry.org/docs/pyproject/#scripts +"stac-model" = "stac_model.__main__:app" + + +[tool.poetry.dependencies] +python = "^3.10" + +typer = {extras = ["all"], version = "^0.7.0"} +rich = "^12.6.0" +pydantic = "~2.3.0" # bug in post 2.3 https://github.com/pydantic/pydantic/issues/7720 +pydantic-core = "~2" +numpy = "^1.26.2" + + +[tool.poetry.group.dev.dependencies] +mypy = "^1.0.0" +mypy-extensions = "^0.4.3" +pre-commit = "^2.21.0" +bandit = "^1.7.5" +safety = "^2.3.4" + + +pydocstyle = {extras = ["toml"], version = "^6.2.0"} +pydoclint = "^0.3.0" + +pytest = "^7.2.1" +pytest-html = "^3.2.0" +pytest-cov = "^4.1.0" +pytest-mock = "^3.10.0" +pytest-timeout = "^2.2.0" +pytest-benchmark = "^4.0.0" +pytest-sugar = "^0.9.7" +pytest-click = "^1.1.0" +pytest-pikachu = "^1.0.0" +coverage = "^7.3.0" +ruff = "^0.1.7" + +[tool.ruff] +exclude = [ + ".git", + "__pycache__", + ".mypy_cache", + ".tox", + ".venv", + "_build", + "buck-out", + "build", + "dist", + "env", + "venv" +] + + +[tool.mypy] +# https://github.com/python/mypy +# https://mypy.readthedocs.io/en/latest/config_file.html#using-a-pyproject-toml-file +python_version = "3.10" +pretty = true +show_traceback = true +color_output = true + +allow_redefinition = false +check_untyped_defs = true +disallow_any_generics = true +disallow_incomplete_defs = true +ignore_missing_imports = true +implicit_reexport = false +no_implicit_optional = true +show_column_numbers = true +show_error_codes = true +show_error_context = true +strict_equality = true +strict_optional = true +warn_no_return = true +warn_redundant_casts = true +warn_return_any = true +warn_unreachable = true +warn_unused_configs = true +warn_unused_ignores = true + +plugins = [ + "pydantic.mypy" +] + + +[tool.pydantic-mypy] +init_forbid_extra = true +init_typed = true +warn_required_dynamic_aliases = true + +[tool.pydocstyle] +# https://github.com/PyCQA/pydocstyle +# http://www.pydocstyle.org/en/stable/usage.html#available-options +convention = "google" + + +[tool.pydoclint] +# https://github.com/jsh9/pydoclint +# https://jsh9.github.io/pydoclint/how_to_config.html +style = "google" +exclude = ''' +/( + \.git + | \.hg + | \.mypy_cache + | \.tox + | \.venv + | __pycache__ + | _build + | buck-out + | build + | dist + | env + | venv +)/ +''' + + +[tool.pytest.ini_options] +# https://github.com/pytest-dev/pytest +# https://docs.pytest.org/en/6.2.x/customize.html#pyproject-toml +# Directories that are not visited by pytest collector: +norecursedirs =[ + "hooks", + "*.egg", + ".eggs", + "dist", + "build", + "docs", + ".tox", + ".git", + "__pycache__" +] +doctest_optionflags = ["NUMBER", "NORMALIZE_WHITESPACE", "IGNORE_EXCEPTION_DETAIL"] +timeout = 1000 + +# Extra options: +addopts = [ + "--strict-markers", + "--tb=short", + "--doctest-modules", + "--doctest-continue-on-failure", + "--pikachu" +] + + +[tool.coverage.run] +source = ["tests"] +branch = true + + +[tool.coverage.report] +exclude_also = [ + "def main", + "if __name__ == .__main__.:" +] +fail_under = 50 +show_missing = true + + +[tool.coverage.paths] +source = ["stac_model"] diff --git a/stac_model/requirements.txt b/stac_model/requirements.txt new file mode 100644 index 0000000..6613a77 --- /dev/null +++ b/stac_model/requirements.txt @@ -0,0 +1,12 @@ +annotated-types==0.6.0 ; python_version >= "3.10" and python_version < "4.0" +click==8.1.7 ; python_version >= "3.10" and python_version < "4.0" +colorama==0.4.6 ; python_version >= "3.10" and python_version < "4.0" +commonmark==0.9.1 ; python_version >= "3.10" and python_version < "4.0" +numpy==1.26.2 ; python_version >= "3.10" and python_version < "4.0" +pydantic-core==2.6.3 ; python_version >= "3.10" and python_version < "4.0" +pydantic==2.3.0 ; python_version >= "3.10" and python_version < "4.0" +pygments==2.17.2 ; python_version >= "3.10" and python_version < "4.0" +rich==12.6.0 ; python_version >= "3.10" and python_version < "4.0" +shellingham==1.5.4 ; python_version >= "3.10" and python_version < "4.0" +typer[all]==0.7.0 ; python_version >= "3.10" and python_version < "4.0" +typing-extensions==4.9.0 ; python_version >= "3.10" and python_version < "4.0" diff --git a/stac_model/stac_model/__init__.py b/stac_model/stac_model/__init__.py new file mode 100644 index 0000000..5ed00f2 --- /dev/null +++ b/stac_model/stac_model/__init__.py @@ -0,0 +1,8 @@ +"""A PydanticV2 validation and serialization library for the STAC ML Model Extension""" + +from importlib import metadata + +try: + __version__ = metadata.version("stac-model") +except metadata.PackageNotFoundError: + __version__ = "unknown" diff --git a/stac_model/stac_model/__main__.py b/stac_model/stac_model/__main__.py new file mode 100644 index 0000000..5032827 --- /dev/null +++ b/stac_model/stac_model/__main__.py @@ -0,0 +1,69 @@ +import typer +from rich.console import Console + +from stac_model import __version__ +from stac_model.schema import * + + +app = typer.Typer( + name="stac-model", + help="A PydanticV2 validation and serialization libary for the STAC ML Model Extension", + add_completion=False, +) +console = Console() + + +def version_callback(print_version: bool) -> None: + """Print the version of the package.""" + if print_version: + console.print(f"[yellow]stac-model[/] version: [bold blue]{__version__}[/]") + raise typer.Exit() + + +@app.command(name="") +def main( + print_version: bool = typer.Option( + None, + "-v", + "--version", + callback=version_callback, + is_eager=True, + help="Prints the version of the stac-model package.", + ), +) -> None: + """Generate example spec.""" + + input_sig = TensorSignature( + name="input_tensor", dtype="float32", shape=(-1, 13, 64, 64) + ) + output_sig = TensorSignature(name="output_tensor", dtype="float32", shape=(-1, 10)) + model_sig = ModelSignature(inputs=[input_sig], outputs=[output_sig]) + model_artifact = ModelArtifact(path="s3://example/s3/uri/model.pt") + class_map = ClassMap( + class_to_label_id={ + "Annual Crop": 0, + "Forest": 1, + "Herbaceous Vegetation": 2, + "Highway": 3, + "Industrial Buildings": 4, + "Pasture": 5, + "Permanent Crop": 6, + "Residential Buildings": 7, + "River": 8, + "SeaLake": 9, + } + ) + meta = ModelMetadata( + name="eurosat", + class_map=class_map, + signatures=model_sig, + artifact=model_artifact, + ml_model_processor_type="cpu", + ) + json_str = meta.model_dump_json(indent=2) + with open("example.json", "w") as file: + file.write(json_str) + print(meta) + +if __name__ == "__main__": + app() diff --git a/stac_model/stac_model/schema.py b/stac_model/stac_model/schema.py new file mode 100644 index 0000000..e183bc7 --- /dev/null +++ b/stac_model/stac_model/schema.py @@ -0,0 +1,192 @@ +from pydantic import ( + BaseModel, + Field, + FilePath, + field_validator, + field_serializer, + AnyUrl, + ConfigDict, +) +from typing import Optional +import re +from typing import List, Tuple, Dict, Optional, Literal, Any +import numpy as np +from uuid import uuid4 +# import numpy.typing as npt + + +class TensorSignature(BaseModel): + """Tensor metadata, including the dtype (int8, float32, etc) and the tensor shape.""" + + name: Optional[str] = None + # TODO there's a couple of issues blocking numpy typing with + # pydantic or I'm not familiar enough with custom validators + # https://github.com/numpy/numpy/issues/25206 + # dtype: npt.DTypeLike = Field(...) + dtype: str = Field(...) + shape: Tuple[int, ...] | List[int] = Field(...) + model_config = ConfigDict(arbitrary_types_allowed=True) + + # TODO can't take numpy types for now until new pydant 2.6 + # # NotImplementedError: Cannot check isinstance when validating from json, use a JsonOrPython validator instead. + # @field_serializer('dtype') + # def serialize_ndtype(self, dtype: np.dtype) -> str: + # return dtype.name + # @field_validator('dtype', mode="before") + # @classmethod + # def convert_dtype(cls, v): + # if isinstance(v, str): + # v = np.dtype(v) + # elif not isinstance(v, np.dtype): + # raise ValueError(f'Expected np.dtype, received {type(v).__name__}') + # return v + + # @field_validator('shape') + # @classmethod + # def validate_shape(cls, v): + # if not isinstance(v, (tuple, list)): + # raise ValueError(f'Expected tuple or list for shape, received {type(v).__name__}') + # return list(v) + + +class ModelSignature(BaseModel): + """The name of the input tensor and accompanying tensor metadata.""" + + inputs: List[TensorSignature] + outputs: List[TensorSignature] + params: Optional[ + Dict[str, int | float | str] + ] = None # Or any other type that 'params' might take + + class Config: + arbitrary_types_allowed = True + + @property + def inputs_length(self) -> int: + return len(self.inputs) + + @property + def outputs_length(self) -> int: + return len(self.outputs) + + +class RuntimeConfig(BaseModel): + """TODO decide how to handle model runtime configurations. dependencies and hyperparams""" + + environment: str + + +class S3Path(AnyUrl): + allowed_schemes = {"s3"} + user_required = False + max_length = 1023 + min_length = 8 + + @field_validator("url") + @classmethod + def validate_s3_url(cls, v): + if not v.startswith("s3://"): + raise ValueError("S3 path must start with s3://") + if len(v) < cls.min_length: + raise ValueError("S3 path is too short") + if len(v) > cls.max_length: + raise ValueError("S3 path is too long") + return v + + @field_validator("host") + @classmethod + def validate_bucket_name(cls, v): + if not v: + raise ValueError("Bucket name cannot be empty") + if not 3 <= len(v) <= 63: + raise ValueError("Bucket name must be between 3 and 63 characters") + if not re.match(r"^[a-z0-9.\-]+$", v): + raise ValueError( + "Bucket name can only contain lowercase letters, numbers, dots, and hyphens" + ) + if v.startswith("-") or v.endswith("-"): + raise ValueError("Bucket name cannot start or end with a hyphen") + if ".." in v: + raise ValueError("Bucket name cannot have consecutive periods") + return v + + @field_validator("path") + @classmethod + def validate_key(cls, v): + if "//" in v: + raise ValueError("Key must not contain double slashes") + if "\\" in v: + raise ValueError("Backslashes are not standard in S3 paths") + if "\t" in v or "\n" in v: + raise ValueError("Key cannot contain tab or newline characters") + return v.strip("/") + + +class ModelArtifact(BaseModel): + """Information about the model location and other additional file locations.""" + + path: S3Path | FilePath | str = Field(...) + additional_files: Optional[Dict[str, FilePath]] = None + + class Config: + arbitrary_types_allowed = True + + @field_validator("path") + @classmethod + def check_path_type(cls, v): + if isinstance(v, str): + if v.startswith("s3://"): + v = S3Path(url=v) + else: + v = FilePath(f=v) + else: + raise ValueError( + f"Expected str, S3Path, or FilePath input, received {type(v).__name__}" + ) + return v + + +class ClassMap(BaseModel): + class_to_label_id: Dict[str, int] + + # Property to reverse the mapping + @property + def label_id_to_class(self) -> Dict[int, str]: + # Reverse the mapping + return {v: k for k, v in self.class_to_label_id.items()} + + def get_class(self, class_id: int) -> str: + """Get class name from class id.""" + if class_id not in self.label_id_to_class: + raise ValueError(f"Class ID '{class_id}' not found") + return self.label_id_to_class[class_id] + + def get_label_id(self, class_name: str) -> int: + """Get class id from class name.""" + if class_name not in self.class_to_label_id: + raise ValueError(f"Class name '{class_name}' not found") + return self.class_to_label_id[class_name] + + +class ModelMetadata(BaseModel): + signatures: ModelSignature + artifact: ModelArtifact + id: str = Field(default_factory=lambda: uuid4().hex) + class_map: ClassMap + + # Runtime configurations required to run the model. + # TODO requirements.txt , conda.yml, or lock files for each should be supported in future. + runtime_config: Optional[RuntimeConfig] = None + + # the name of the model + name: str + ml_model_type: Optional[str] = None + ml_model_processor_type: Optional[Literal["cpu", "gpu", "tpu", "mps"]] = None + ml_model_learning_approach: Optional[str] = None + ml_model_prediction_type: Optional[ + Literal["object-detection", "classification", "segmentation", "regression"] + ] = None + ml_model_architecture: Optional[str] = None + + class Config: + arbitrary_types_allowed = True diff --git a/stac_model/tests/test_schema.py b/stac_model/tests/test_schema.py new file mode 100644 index 0000000..b23bfdc --- /dev/null +++ b/stac_model/tests/test_schema.py @@ -0,0 +1,68 @@ +import pytest +from stac_model.schema import ( + TensorSignature, + ModelSignature, + ModelArtifact, + ClassMap, + ModelMetadata, +) +import os +import tempfile + + +def create_metadata(): + input_sig = TensorSignature( + name="input_tensor", dtype="float32", shape=(-1, 13, 64, 64) + ) + output_sig = TensorSignature(name="output_tensor", dtype="float32", shape=(-1, 10)) + model_sig = ModelSignature(inputs=[input_sig], outputs=[output_sig]) + model_artifact = ModelArtifact(path="s3://example/s3/uri/model.pt") + class_map = ClassMap( + class_to_label_id={ + "Annual Crop": 0, + "Forest": 1, + "Herbaceous Vegetation": 2, + "Highway": 3, + "Industrial Buildings": 4, + "Pasture": 5, + "Permanent Crop": 6, + "Residential Buildings": 7, + "River": 8, + "SeaLake": 9, + } + ) + return ModelMetadata( + name="eurosat", + class_map=class_map, + signatures=model_sig, + artifact=model_artifact, + ml_model_processor_type="cpu", + ) + + +@pytest.fixture +def metadata_json(): + model_metadata = create_metadata() + return model_metadata.model_dump_json(indent=2) + + +def test_model_metadata_json_operations(metadata_json): + # Use a temporary directory + with tempfile.TemporaryDirectory() as temp_dir: + temp_filepath = os.path.join(temp_dir, "tempfile.json") + + # Write to the file + with open(temp_filepath, "w") as file: + file.write(metadata_json) + + # Read and validate the model metadata from the JSON file + with open(temp_filepath, "r") as json_file: + json_str = json_file.read() + model_metadata = ModelMetadata.model_validate_json(json_str) + + assert model_metadata.name == "eurosat" + + +def test_benchmark_model_metadata_validation(benchmark): + json_str = create_metadata().model_dump_json(indent=2) + benchmark(ModelMetadata.model_validate_json, json_str) From 0aa94fcbcc582cc951c7ca161032dde2383a7268 Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Thu, 14 Dec 2023 17:05:21 -0800 Subject: [PATCH 002/112] refactor models and replace data object with common metadata band object --- README.md | 177 ++++++++++++++++++------------------------------------ 1 file changed, 59 insertions(+), 118 deletions(-) diff --git a/README.md b/README.md index bca7af2..b11f421 100644 --- a/README.md +++ b/README.md @@ -1,37 +1,27 @@ # Deep Learning Model Extension Specification -[![hackmd-github-sync-badge](https://hackmd.io/3fB1lrHSTcSHQS57UhVk-Q/badge)](https://hackmd.io/3fB1lrHSTcSHQS57UhVk-Q) +![hackmd-github-sync-badge](https://hackmd.io/3fB1lrHSTcSHQS57UhVk-Q/badge)](https://hackmd.io/3fB1lrHSTcSHQS57UhVk-Q) - **Title:** Deep Learning Model Extension -- **Identifier:** +- **Identifier:** [https://schemas.stacspec.org/v1.0.0-beta.3/extensions/dl-model/json-schema/schema.json](https://schemas.stacspec.org/v1.0.0-beta.3/extensions/dl-model/json-schema/schema.json) - **Field Name Prefix:** dlm - **Scope:** Item, Collection -- **Extension [Maturity Classification][stac-ext-maturity]:** Proposal -- **Owner**: - [@sfoucher](https://github.com/sfoucher) - [@fmigneault](https://github.com/fmigneault) - [@ymoisan](https://github.com/ymoisan) - -[stac-ext-maturity]: https://github.com/radiantearth/stac-spec/tree/master/extensions/README.md#extension-maturity - -This document explains the Template Extension to the [SpatioTemporal Asset Catalog][stac-spec] (STAC) specification. -This document explains the fields of the STAC Deep Learning Model (dlm) Extension to a STAC Item. -The main objective is to be able to build model collections that can be searched -and that contain enough information to be able to deploy an inference service. -When Deep Learning models are trained using satellite imagery, it is important -to track essential information if you want to make them searchable and reusable: +- **Extension Maturity Classification:** Proposal +- **Owner:** + - [@sfoucher](https://github.com/sfoucher) + - [@fmigneault](https://github.com/fmigneault) + - [@ymoisan](https://github.com/ymoisan) + +This document explains the Template Extension to the [SpatioTemporal Asset Catalog (STAC)](https://github.com/radiantearth/stac-spec) specification. This document explains the fields of the STAC Deep Learning Model (dlm) Extension to a STAC Item. The main objective is to be able to build model collections that can be searched and that contain enough information to be able to deploy an inference service. When Deep Learning models are trained using satellite imagery, it is important to track essential information if you want to make them searchable and reusable: 1. Input data origin and specifications -2. Model base transforms +2. Model basic transforms: rescale and normalization 3. Model output and its semantic interpretation 4. Runtime environment to be able to run the model 5. Scientific references -[stac-spec]: https://github.com/radiantearth/stac-spec - -Check the original technical report -[here](https://github.com/crim-ca/CCCOT03/raw/main/CCCOT03_Rapport%20Final_FINAL_EN.pdf) for more details. +Check the original technical report [here](https://github.com/crim-ca/CCCOT03/raw/main/CCCOT03_Rapport%20Final_FINAL_EN.pdf) for more details. -![](https://i.imgur.com/cVAg5sA.png) +![Image Description](https://i.imgur.com/cVAg5sA.png) - Examples: - [Example with a UNet trained with thelper](examples/item.json) @@ -43,118 +33,71 @@ Check the original technical report | Field Name | Type | Description | |------------------|---------------------------------------------|------------------------------------------------------------------------| -| dlm:data | [Data Object](#data-object) | Describes the EO data compatible with the model. | -| dlm:inputs | [Inputs Object](#inputs-object) | Describes the transformation between the EO data and the model inputs. | +| bands | [Band Object](#bands) | Describes the EO data used to train or fine-tune the model. | +| dlm:input | [Input Object](#input-object) | Describes the transformation between the EO data and the model input. | | dlm:architecture | [Architecture Object](#architecture-object) | Describes the model architecture. | | dlm:runtime | [Runtime Object](#runtime-object) | Describes the runtime environments to run the model (inference). | -| dlm:outputs | [Outputs Object](#outputs-object) | Describes each model output and how to interpret it. | +| dlm:output | [Output Object](#output-object) | Describes each model output and how to interpret it. | In addition, fields from the following extensions must be imported in the item: - [Scientific Extension Specification][stac-ext-sci] to describe relevant publications. -- [EO Extension Specification][stac-ext-eo] to describe eo data. - [Version Extension Specification][stac-ext-ver] to define version tags. [stac-ext-sci]: https://github.com/radiantearth/stac-spec/tree/v1.0.0-beta.2/extensions/scientific/README.md -[stac-ext-eo]: https://github.com/radiantearth/stac-spec/tree/v1.0.0-beta.2/extensions/eo/README.md [stac-ext-ver]: https://github.com/radiantearth/stac-spec/tree/v1.0.0-beta.2/extensions/version/README.md -### Data Object - -| Field Name | Type | Description | -|-----------------|--------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------| -| process_level | [Process Level Enum](#process-level-enum) | Data processing level that represents the apparent variability of the data. | -| data_type | [Data Type Enum](#data-type-enum) | Data type (`uint8`, `uint16`, etc.) enum based on numpy base types for data normalization and pre-processing. | -| nodata | integer \| string | Value indicating *nodata*, which could require special data preparation by the network (see [No Data Value](#no-data-value)). | -| number_of_bands | integer | Number of bands used by the model | -| useful_bands | \[[Model Band Object](#model-band-object)] | Describes bands by index in the relevant order for the model input. | - -#### Process Level Enum - -It is recommended to use the [STAC Processing Extension][stac-ext-proc] -to represent the `processing:level` of the relevant level `L0` for raw data up to `L4` for Analysis-Ready Data (ARD). - -[stac-ext-proc]: https://github.com/stac-extensions/processing#suggested-processing-levels - -#### Data Type Enum - -It is recommended to use the [STAC Raster Extension - Raster Band Object][stac-ext-raster-band-obj] -in STAC Collections and Items that refer to a STAC Item using `dlm`'s `data_type`. The values should be one of the known -data types defined by `raster:bands`'s `data_type` as presented in [Data Types][stac-ext-raster-dtype]. - -If source imagery has different `data_type` values than the one specified by `dlm`'s `data_type` property, -this should provide -an indication that the source imagery might require a preprocessing step (scaling, normalization, conversion, etc.) -to adapt the samples to the relevant format expected by the described model. -[stac-ext-raster-dtype]: https://github.com/stac-extensions/raster/#data-types -[stac-ext-raster-band-obj]: https://github.com/stac-extensions/raster/#raster-band-object +### Bands -#### No Data Value +We use the STAC 1.1 Bands Object for representing bands information, including nodata value, data type, and common band names. Only bands used to train or fine tune the model should be included in this list. -It is recommended to use the [STAC Raster Extension - Raster Band Object](https://github.com/stac-extensions/raster/#raster-band-object) -in STAC Collections and Items that refer to a STAC Item using `dlm`'s `nodata`. This value should either map -to the `raster:bands`'s `nodata` property of relevant bands, or a classification label value representing -a "*background*" pixel mask (see [STAC Label Extension - Raster Label Notes][stac-ext-raster-label]) -from a `label:type` defined as `raster` with the relevant `raster` asset provided. +### Input Object -If source imagery has different `nodata` values than the one specified by `dlm`'s `nodata` property, this should provide -an indication that the source imagery might require a preprocessing step to adapt the samples to the values expected by -the described model. +| Field Name | Type | Description | +|-------------------------|---------------------------------|---------------------------------------------------------------------------------------------------------------------------------| +| name | string | Python name of the input variable. | +| input_tensors | [Tensor Object](#tensor-object) | Shape of the input tensor ($N \times C \times H \times W$). | +| scaling_factor | number | Scaling factor to apply to get data within `[0,1]`. For instance `scaling_factor=0.004` for 8-bit data. | +| mean | list of numbers | Mean vector value to be removed from the data if norm_type uses mean. The vector size must be consistent with `input_tensors:dim`. | +| std | list of numbers | Standard deviation values used to normalize the data if norm type uses standard deviation. The vector size must be consistent with `input_tensors:dim`. | +| band_names | list of common metadata band names | Specifies the ordering of the bands selected from the bands list described in [bands](#Bands). | -[stac-ext-raster-label]: https://github.com/stac-extensions/label#raster-label-notes - -#### Model Band Object - -Can be combined with `eo:bands`'s [`Band Object`][stac-ext-eo-band-obj]. - -[stac-ext-eo-band-obj]: https://github.com/stac-extensions/eo#band-object - -| Field Name | Type | Description | -|-----------------|---------|------------------------------------------| -| index | integer | **REQUIRED** Index of the spectral band. | -| name | string | Short name of the band for convenience. | - -### Inputs Object - -| Field Name | Type | Description | -|-------------------------|---------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------| -| name | string | Python name of the input variable. | -| input_tensors | [Tensor Object](#tensor-object) | Shape of the input tensor ($N \times C \times H \times W$). | -| scaling_factor | number | Scaling factor to apply to get data within `[0,1]`. For instance `scaling_factor=0.004` for 8-bit data. | -| normalization:mean | list of numbers | Mean vector value to be removed from the data. The vector size must be consistent with `input_tensors:dim` and `selected_bands`. | -| normalization:std | list of numbers | Standard-deviation values used to normalize the data. The vector size must be consistent with `input_tensors:dim` and `selected_bands`. | -| selected_band | list of integers | Specifies the bands selected from the data described in dlm:data. | -| pre_processing_function | string | Defines a python pre-processing function (path and inputs should be specified). | #### Tensor Object | Field Name | Type | Description | |------------|--------|-------------------------------------| | batch | number | Batch size dimension (must be > 0). | -| dim | number | Number of channels (must be > 0). | +| time | number | Number of timesteps (must be > 0). | +| channels | number | Number of channels (must be > 0). | | height | number | Height of the tensor (must be > 0). | | width | number | Width of the tensor (must be > 0). | + ### Architecture Object | Field Name | Type | Description | |-------------------------|---------|-------------------------------------------------------------| -| total_nb_parameters | integer | Total number of parameters. | -| estimated_total_size_mb | number | The equivalent memory size in MB. | +| total_parameters | integer | Total number of parameters. | +| on_disk_size_mb | number | The equivalent memory size on disk in MB. | +| ram_size_mb | number | number | The equivalent memory size in memory in MB. | | type | string | Type of network (ex: ResNet-18). | | summary | string | Summary of the layers, can be the output of `print(model)`. | | pretrained | string | Indicates the source of the pretraining (ex: ImageNet). | ### Runtime Object -| Field Name | Type | Description | -|-------------------|------------------------------------|------------------------------------------------------------------------------| -| framework | string | Used framework (ex: PyTorch, TensorFlow). | -| version | string | Framework version (some models require a specific version of the framework). | -| model_handler | string | Inference execution function. | -| model_src_url | string | Url of the source code (ex: GitHub repo). | -| model_commit_hash | string | Hash value pointing to a specific version of the code. | -| docker | \[[Docker Object](#docker-object)] | Information for the deployment of the model in a docker instance. | +| Field Name | Type | Description | +|-----------------------|------------------------------------|------------------------------------------------------------------------------------------| +| framework | string | Used framework (ex: PyTorch, TensorFlow). | +| version | string | Framework version (some models require a specific version of the framework). | +| model_artifact | string | Blob storage URI, POSIX filepath in docker image, or other URI type to the model file. | +| model_handler | string | Inference execution function. | +| model_src_url | string | Url of the source code (ex: GitHub repo). | +| model_commit_hash | string | Hash value pointing to a specific version of the code. | +| docker | \[[Docker Object](#docker-object)] | Information for the deployment of the model in a docker instance. | +| batch_size_suggestion | number | A suggested batch size for a given compute instance type | +| instance_suggestion | str #### Docker Object @@ -165,18 +108,17 @@ Can be combined with `eo:bands`'s [`Band Object`][stac-ext-eo-band-obj]. | tag | string | Tag of the image. | | working_dir | string | Working directory in the instance that can be mapped. | | run | string | Running command. | -| gpu | boolean | True if the docker image requires a GPU. | +| accelerator | boolean | True if the docker image requires a custom accelerator (CPU,TPU,MPS). | + +### Output Object -### Outputs Object +| Field Name | Type | Description | +|--------------------------|-------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| task | [Task Enum](#task-enum) | Specifies the Machine Learning task for which the output can be used for. | +| number_of_classes | integer | Number of classes. | +| final_layer_size | \[integer] | Sizes of the output tensor as ($N \times C \times H \times W$). | +| class_name_mapping | list | Mapping of the output index to a short class name, for each record we specify the index and the class name. | -| Field Name | Type | Description | -|--------------------------|-------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| task | [Task Enum](#task-enum) | Specifies the Machine Learning task for which the output can be used for. | -| number_of_classes | integer | Number of classes. | -| final_layer_size | \[integer] | Sizes of the output tensor as ($N \times C \times H \times W$). | -| class_name_mapping | list | Mapping of the output index to a short class name, for each record we specify the index and the class name. | -| dont_care_index | integer | Some models are using a *do not care* value which is ignored in the input data. This is an optional parameter. | -| post_processing_function | string | Some models are using a complex post-processing that can be specified using a post processing function. The python package should be specified as well as the input and outputs type. For example:`my_python_module_name:my_processing_function(Tensor) -> Tensor` | #### Task Enum @@ -184,12 +126,15 @@ It is recommended to define `task` with one of the following values: - `regression` - `classification` - `object detection` -- `segmentation` (generic) - `semantic segmentation` - `instance segmentation` - `panoptic segmentation` +- `multi-modal` +- `similarity search` +- `image captioning` +- `generative` -This should align with the `label:tasks` values defined in [STAC Label Extension][stac-ext-label-props] for relevant +If the task falls within supervised machine learning and uses labels during training, this should align with the `label:tasks` values defined in [STAC Label Extension][stac-ext-label-props] for relevant STAC Collections and Items employed with the model described by this extension. [stac-ext-label-props]: https://github.com/stac-extensions/label#item-properties @@ -216,14 +161,10 @@ for running tests are copied here for convenience. ### Running tests -The same checks that run as checks on PRs are part of the repository and can be run locally to verify -that changes are valid. -To run tests locally, you'll need `npm`, which is a standard part of any [node.js][nodejs] installation. +The same checks that run as checks on PRs are part of the repository and can be run locally to verify that changes are valid. To run tests locally, you'll need `npm`, which is a standard part of any [node.js](https://nodejs.org/en/download/) installation. -[nodejs]: https://nodejs.org/en/download/ +First, install everything with npm once. Navigate to the root of this repository and on your command line run: -First you'll need to install everything with npm once. Just navigate to the root of this repository and on -your command line run: ```bash npm install ``` From d67ca110dcbbca0c45658d3167b32cf419d5d483 Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Fri, 15 Dec 2023 16:57:26 -0800 Subject: [PATCH 003/112] basic pydantic models for refactored model extension --- CHANGELOG.md | 27 +++++ README.md | 29 +++-- stac_model/stac_model/main.py | 121 +++++++++++++++++++ stac_model/stac_model/paths.py | 44 +++++++ stac_model/stac_model/runtime.py | 25 ++++ stac_model/stac_model/schema.py | 192 ------------------------------- 6 files changed, 235 insertions(+), 203 deletions(-) create mode 100644 stac_model/stac_model/main.py create mode 100644 stac_model/stac_model/paths.py create mode 100644 stac_model/stac_model/runtime.py delete mode 100644 stac_model/stac_model/schema.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 880efdc..b1471da 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,33 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +### Added +- more Task Enum tasks +- accelerator options +- batch_size and hardware suggestion +- ram_size_mb to specify model ram requirements during inference +- added time to the Tensor object as an optional dim + +### Changed +- selected_bands > band_names, the same human readable names used in the common metadata band objects. +- replaced normalization:mean, etc. with statistics from STAC 1.1 common metadata +- added pydantic models for internal schema objects + +[raster-band-object]: https://github.com/stac-extensions/raster/#raster-band-object + +### Deprecated +- Specifying `class_name_mapping` by array is deprecated. + Direct mapping as an object of index to class name should be used. + For backward compatibility, mapping as array and using nested objects with `index` and `class_name` properties + is still permitted, although overly verbose compared to the direct mapping. + +### Removed +- Data Object, replaced with common metadata band object which also records data_type and nodata type + +# TODO link release + +## [Unreleased] + ### Added - Added example model architecture summary text. diff --git a/README.md b/README.md index b11f421..69ebd45 100644 --- a/README.md +++ b/README.md @@ -49,19 +49,24 @@ In addition, fields from the following extensions must be imported in the item: ### Bands -We use the STAC 1.1 Bands Object for representing bands information, including nodata value, data type, and common band names. Only bands used to train or fine tune the model should be included in this list. +We use the [STAC 1.1 Bands Object](https://github.com/radiantearth/stac-spec/pull/1254) for representing bands information, including nodata value, data type, and common band names. Only bands used to train or fine tune the model should be included in this list. -### Input Object +A deviation is that we do not include the [Statistics](https://github.com/radiantearth/stac-spec/pull/1254/files#diff-2477b726f8c5d5d1c8b391be056db325e6918e78a24b414ccd757c7fbd574079R294) object at the band object level, but at the Model Input level. This is because in machine learning, we typically only need statistics for the dataset used to train the model in order to normalize any given bands input. + +### Model Input | Field Name | Type | Description | |-------------------------|---------------------------------|---------------------------------------------------------------------------------------------------------------------------------| | name | string | Python name of the input variable. | +| band_names | list of common metadata band names | Specifies the ordering of the bands selected from the bands list described in [bands](#Bands). | | input_tensors | [Tensor Object](#tensor-object) | Shape of the input tensor ($N \times C \times H \times W$). | +| params | dict | dictionary with names for the parameters and their values. some models may take scalars or other non-tensor inputs. | | scaling_factor | number | Scaling factor to apply to get data within `[0,1]`. For instance `scaling_factor=0.004` for 8-bit data. | -| mean | list of numbers | Mean vector value to be removed from the data if norm_type uses mean. The vector size must be consistent with `input_tensors:dim`. | -| std | list of numbers | Standard deviation values used to normalize the data if norm type uses standard deviation. The vector size must be consistent with `input_tensors:dim`. | -| band_names | list of common metadata band names | Specifies the ordering of the bands selected from the bands list described in [bands](#Bands). | - +| norm_by_channel | string | Whether to normalize each channel by channel-wise statistics or to normalize by dataset statistics. "True" or "False" | +| norm_type | string | Normalization method. Select one option from "min_max", "z_score", "max_norm", "mean_norm", "unit_variance", "none" | +| rescale_type | string | High level descriptor of the rescaling method to change image shape. Select one option from "crop", "pad", "interpolation", "none". If your rescaling method combines more than one of these operations, provide the name of the operation instead| +| statistics | [Statistics Object](https://github.com/radiantearth/stac-spec/pull/1254/files#diff-2477b726f8c5d5d1c8b391be056db325e6918e78a24b414ccd757c7fbd574079R294) | Dataset statistics for the training dataset used to normalize the inputs. | +| pre_processing_function | string | A url to the preprocessing function where normalization and rescaling takes place, and any other significant operations. Or, instead, the function code path, for example: my_python_module_name:my_processing_function| #### Tensor Object @@ -72,6 +77,7 @@ We use the STAC 1.1 Bands Object for representing bands information, including n | channels | number | Number of channels (must be > 0). | | height | number | Height of the tensor (must be > 0). | | width | number | Width of the tensor (must be > 0). | +|dim_order | string | How the above dimensions are ordered with the tensor. "bhw", "bchw", "bthw", "btchw", "bcthw" | ### Architecture Object @@ -79,9 +85,9 @@ We use the STAC 1.1 Bands Object for representing bands information, including n | Field Name | Type | Description | |-------------------------|---------|-------------------------------------------------------------| | total_parameters | integer | Total number of parameters. | -| on_disk_size_mb | number | The equivalent memory size on disk in MB. | -| ram_size_mb | number | number | The equivalent memory size in memory in MB. | -| type | string | Type of network (ex: ResNet-18). | +| on_disk_size_mb | number | The memory size on disk of the model artifact (MB). | +| ram_size_mb | number | number | The memory size in accelerator memory during inference (MB).| +| model_type | string | Type of network (ex: ResNet-18). | | summary | string | Summary of the layers, can be the output of `print(model)`. | | pretrained | string | Indicates the source of the pretraining (ex: ImageNet). | @@ -97,7 +103,7 @@ We use the STAC 1.1 Bands Object for representing bands information, including n | model_commit_hash | string | Hash value pointing to a specific version of the code. | | docker | \[[Docker Object](#docker-object)] | Information for the deployment of the model in a docker instance. | | batch_size_suggestion | number | A suggested batch size for a given compute instance type | -| instance_suggestion | str +| hardware_suggestion | str | A suggested cloud instance type or accelerator model | #### Docker Object @@ -117,7 +123,8 @@ We use the STAC 1.1 Bands Object for representing bands information, including n | task | [Task Enum](#task-enum) | Specifies the Machine Learning task for which the output can be used for. | | number_of_classes | integer | Number of classes. | | final_layer_size | \[integer] | Sizes of the output tensor as ($N \times C \times H \times W$). | -| class_name_mapping | list | Mapping of the output index to a short class name, for each record we specify the index and the class name. | +| class_name_mapping | dict | Mapping of the output index to a short class name, for each record we specify the index and the class name. | +| post_processing_function | string | A url to the postprocessing function where normalization and rescaling takes place, and any other significant operations. Or, instead, the function code path, for example: my_python_module_name:my_processing_function| #### Task Enum diff --git a/stac_model/stac_model/main.py b/stac_model/stac_model/main.py new file mode 100644 index 0000000..eb01278 --- /dev/null +++ b/stac_model/stac_model/main.py @@ -0,0 +1,121 @@ +from pydantic import BaseModel, Field, AnyUrl +from typing import List, Optional, Literal, Any, List, Tuple, Dict, Optional, Literal +from pydantic import ( + BaseModel, + Field, + AnyUrl +) +from enum import Enum +from .runtime import ModelArtifact + +class Band(BaseModel): + name: str + description: str + nodata: float | int | str + data_type: str + unit: Optional[str] + +class TensorObject(BaseModel): + batch: int = Field(..., gt=0) + time: Optional[int] = Field(..., gt=0) + channels: Optional[int] = Field(..., gt=0) + height: int = Field(..., gt=0) + width: int = Field(..., gt=0) + dim_order: Literal["bhw", "bchw", "bthw", "btchw", "bcthw"] + +class Statistics(BaseModel): + minimum: List[float | int] + maximum: List[float | int] + mean: List[float] + stddev: List[float] + count: List[int] + valid_percent: List[float] + +class ModelInput(BaseModel): + name: str + band_names: List[str] + input_tensors: TensorObject + params: Optional[ + Dict[str, int | float | str] + ] = None + scaling_factor: float + norm_by_channel: str + norm_type: Literal["min_max", "z_score", "max_norm", "mean_norm", "unit_variance", "none"] + rescale_type: Literal["crop", "pad", "interpolation", "none"] + statistics: Optional[Statistics] + pre_processing_function: str | AnyUrl + +class ArchitectureObject(BaseModel): + total_parameters: int + on_disk_size_mb: float + ram_size_mb: float + model_type: str + summary: str + pretrained: str + + +class DockerObject(BaseModel): + docker_file: str + image_name: str + tag: str + working_dir: str + run: str + accelerator: bool + +class RuntimeObject(BaseModel): + framework: str + version: str + model_artifact: ModelArtifact + model_handler: str + model_src_url: str + model_commit_hash: str + docker: List[DockerObject] + batch_size_suggestion: int + hardware_suggestion: str | AnyUrl + +class TaskEnum(str, Enum): + regression = "regression" + classification = "classification" + object_detection = "object detection" + semantic_segmentation = "semantic segmentation" + instance_segmentation = "instance segmentation" + panoptic_segmentation = "panoptic segmentation" + multi_modal = "multi-modal" + similarity_search = "similarity search" + image_captioning = "image captioning" + generative = "generative" + +class ClassMap(BaseModel): + class_to_label_id: Dict[str, int] + + # Property to reverse the mapping + @property + def label_id_to_class(self) -> Dict[int, str]: + # Reverse the mapping + return {v: k for k, v in self.class_to_label_id.items()} + + def get_class(self, class_id: int) -> str: + """Get class name from class id.""" + if class_id not in self.label_id_to_class: + raise ValueError(f"Class ID '{class_id}' not found") + return self.label_id_to_class[class_id] + + def get_label_id(self, class_name: str) -> int: + """Get class id from class name.""" + if class_name not in self.class_to_label_id: + raise ValueError(f"Class name '{class_name}' not found") + return self.class_to_label_id[class_name] + +class OutputObject(BaseModel): + task: TaskEnum + number_of_classes: int + final_layer_size: List[int] + class_name_mapping: ClassMap + post_processing_function: str + +class DeepLearningModelExtension(BaseModel): + bands: List[Band] + dlm_input: ModelInput + dlm_architecture: ArchitectureObject + dlm_runtime: RuntimeObject + dlm_output: OutputObject diff --git a/stac_model/stac_model/paths.py b/stac_model/stac_model/paths.py new file mode 100644 index 0000000..30a5fa8 --- /dev/null +++ b/stac_model/stac_model/paths.py @@ -0,0 +1,44 @@ +class S3Path(AnyUrl): + allowed_schemes = {"s3"} + user_required = False + max_length = 1023 + min_length = 8 + + @field_validator("url") + @classmethod + def validate_s3_url(cls, v): + if not v.startswith("s3://"): + raise ValueError("S3 path must start with s3://") + if len(v) < cls.min_length: + raise ValueError("S3 path is too short") + if len(v) > cls.max_length: + raise ValueError("S3 path is too long") + return v + + @field_validator("host") + @classmethod + def validate_bucket_name(cls, v): + if not v: + raise ValueError("Bucket name cannot be empty") + if not 3 <= len(v) <= 63: + raise ValueError("Bucket name must be between 3 and 63 characters") + if not re.match(r"^[a-z0-9.\-]+$", v): + raise ValueError( + "Bucket name can only contain lowercase letters, numbers, dots, and hyphens" + ) + if v.startswith("-") or v.endswith("-"): + raise ValueError("Bucket name cannot start or end with a hyphen") + if ".." in v: + raise ValueError("Bucket name cannot have consecutive periods") + return v + + @field_validator("path") + @classmethod + def validate_key(cls, v): + if "//" in v: + raise ValueError("Key must not contain double slashes") + if "\\" in v: + raise ValueError("Backslashes are not standard in S3 paths") + if "\t" in v or "\n" in v: + raise ValueError("Key cannot contain tab or newline characters") + return v.strip("/") diff --git a/stac_model/stac_model/runtime.py b/stac_model/stac_model/runtime.py new file mode 100644 index 0000000..6c1f830 --- /dev/null +++ b/stac_model/stac_model/runtime.py @@ -0,0 +1,25 @@ +from .paths import S3Path +from pydantic import BaseModel, Field, FilePath, field_validator +from typing import Optional, Dict +class ModelArtifact(BaseModel): + """Information about the model location and other additional file locations.""" + + path: S3Path | FilePath | str = Field(...) + additional_files: Optional[Dict[str, FilePath]] = None + + class Config: + arbitrary_types_allowed = True + + @field_validator("path") + @classmethod + def check_path_type(cls, v): + if isinstance(v, str): + if v.startswith("s3://"): + v = S3Path(url=v) + else: + v = FilePath(f=v) + else: + raise ValueError( + f"Expected str, S3Path, or FilePath input, received {type(v).__name__}" + ) + return v diff --git a/stac_model/stac_model/schema.py b/stac_model/stac_model/schema.py deleted file mode 100644 index e183bc7..0000000 --- a/stac_model/stac_model/schema.py +++ /dev/null @@ -1,192 +0,0 @@ -from pydantic import ( - BaseModel, - Field, - FilePath, - field_validator, - field_serializer, - AnyUrl, - ConfigDict, -) -from typing import Optional -import re -from typing import List, Tuple, Dict, Optional, Literal, Any -import numpy as np -from uuid import uuid4 -# import numpy.typing as npt - - -class TensorSignature(BaseModel): - """Tensor metadata, including the dtype (int8, float32, etc) and the tensor shape.""" - - name: Optional[str] = None - # TODO there's a couple of issues blocking numpy typing with - # pydantic or I'm not familiar enough with custom validators - # https://github.com/numpy/numpy/issues/25206 - # dtype: npt.DTypeLike = Field(...) - dtype: str = Field(...) - shape: Tuple[int, ...] | List[int] = Field(...) - model_config = ConfigDict(arbitrary_types_allowed=True) - - # TODO can't take numpy types for now until new pydant 2.6 - # # NotImplementedError: Cannot check isinstance when validating from json, use a JsonOrPython validator instead. - # @field_serializer('dtype') - # def serialize_ndtype(self, dtype: np.dtype) -> str: - # return dtype.name - # @field_validator('dtype', mode="before") - # @classmethod - # def convert_dtype(cls, v): - # if isinstance(v, str): - # v = np.dtype(v) - # elif not isinstance(v, np.dtype): - # raise ValueError(f'Expected np.dtype, received {type(v).__name__}') - # return v - - # @field_validator('shape') - # @classmethod - # def validate_shape(cls, v): - # if not isinstance(v, (tuple, list)): - # raise ValueError(f'Expected tuple or list for shape, received {type(v).__name__}') - # return list(v) - - -class ModelSignature(BaseModel): - """The name of the input tensor and accompanying tensor metadata.""" - - inputs: List[TensorSignature] - outputs: List[TensorSignature] - params: Optional[ - Dict[str, int | float | str] - ] = None # Or any other type that 'params' might take - - class Config: - arbitrary_types_allowed = True - - @property - def inputs_length(self) -> int: - return len(self.inputs) - - @property - def outputs_length(self) -> int: - return len(self.outputs) - - -class RuntimeConfig(BaseModel): - """TODO decide how to handle model runtime configurations. dependencies and hyperparams""" - - environment: str - - -class S3Path(AnyUrl): - allowed_schemes = {"s3"} - user_required = False - max_length = 1023 - min_length = 8 - - @field_validator("url") - @classmethod - def validate_s3_url(cls, v): - if not v.startswith("s3://"): - raise ValueError("S3 path must start with s3://") - if len(v) < cls.min_length: - raise ValueError("S3 path is too short") - if len(v) > cls.max_length: - raise ValueError("S3 path is too long") - return v - - @field_validator("host") - @classmethod - def validate_bucket_name(cls, v): - if not v: - raise ValueError("Bucket name cannot be empty") - if not 3 <= len(v) <= 63: - raise ValueError("Bucket name must be between 3 and 63 characters") - if not re.match(r"^[a-z0-9.\-]+$", v): - raise ValueError( - "Bucket name can only contain lowercase letters, numbers, dots, and hyphens" - ) - if v.startswith("-") or v.endswith("-"): - raise ValueError("Bucket name cannot start or end with a hyphen") - if ".." in v: - raise ValueError("Bucket name cannot have consecutive periods") - return v - - @field_validator("path") - @classmethod - def validate_key(cls, v): - if "//" in v: - raise ValueError("Key must not contain double slashes") - if "\\" in v: - raise ValueError("Backslashes are not standard in S3 paths") - if "\t" in v or "\n" in v: - raise ValueError("Key cannot contain tab or newline characters") - return v.strip("/") - - -class ModelArtifact(BaseModel): - """Information about the model location and other additional file locations.""" - - path: S3Path | FilePath | str = Field(...) - additional_files: Optional[Dict[str, FilePath]] = None - - class Config: - arbitrary_types_allowed = True - - @field_validator("path") - @classmethod - def check_path_type(cls, v): - if isinstance(v, str): - if v.startswith("s3://"): - v = S3Path(url=v) - else: - v = FilePath(f=v) - else: - raise ValueError( - f"Expected str, S3Path, or FilePath input, received {type(v).__name__}" - ) - return v - - -class ClassMap(BaseModel): - class_to_label_id: Dict[str, int] - - # Property to reverse the mapping - @property - def label_id_to_class(self) -> Dict[int, str]: - # Reverse the mapping - return {v: k for k, v in self.class_to_label_id.items()} - - def get_class(self, class_id: int) -> str: - """Get class name from class id.""" - if class_id not in self.label_id_to_class: - raise ValueError(f"Class ID '{class_id}' not found") - return self.label_id_to_class[class_id] - - def get_label_id(self, class_name: str) -> int: - """Get class id from class name.""" - if class_name not in self.class_to_label_id: - raise ValueError(f"Class name '{class_name}' not found") - return self.class_to_label_id[class_name] - - -class ModelMetadata(BaseModel): - signatures: ModelSignature - artifact: ModelArtifact - id: str = Field(default_factory=lambda: uuid4().hex) - class_map: ClassMap - - # Runtime configurations required to run the model. - # TODO requirements.txt , conda.yml, or lock files for each should be supported in future. - runtime_config: Optional[RuntimeConfig] = None - - # the name of the model - name: str - ml_model_type: Optional[str] = None - ml_model_processor_type: Optional[Literal["cpu", "gpu", "tpu", "mps"]] = None - ml_model_learning_approach: Optional[str] = None - ml_model_prediction_type: Optional[ - Literal["object-detection", "classification", "segmentation", "regression"] - ] = None - ml_model_architecture: Optional[str] = None - - class Config: - arbitrary_types_allowed = True From 195a07b5bfa4e0a0843b2e4d5f7ba51603a86f3b Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Fri, 5 Jan 2024 10:50:53 -0800 Subject: [PATCH 004/112] readme updates --- stac_model/README.md | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/stac_model/README.md b/stac_model/README.md index 1f37fdd..91439b9 100644 --- a/stac_model/README.md +++ b/stac_model/README.md @@ -112,15 +112,13 @@ Currently this looks like You can see the list of available releases on the [GitHub Releases][r1] page. - +## :page_facing_up: License [![License][blic1]][blic2] -This project is licenced under the terms of the `Apache Software License 2.0` licence. See [LICENCE][blic2] for more details. - - -## Credits [![Python project templated from galactipy.][bp6]][bp7] +This project is licenced under the terms of the `Apache Software License 2.0` licence. See [LICENSE][blic2] for more details. -This project was generated with [`galactipy`][bp7]. +## :heartpulse: Credits +[![Python project templated from galactipy.][bp6]][bp7] From 9b3ad07ae7d12e939fe846113dfc54d57561a55e Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Fri, 5 Jan 2024 15:29:32 -0800 Subject: [PATCH 005/112] start filling out base models with schema described in README and main example --- CHANGELOG.md | 2 +- README.md | 71 +++++++++--------- stac_model/stac_model/__main__.py | 16 ++-- stac_model/stac_model/input.py | 39 ++++++++++ stac_model/stac_model/main.py | 121 ------------------------------ stac_model/stac_model/output.py | 30 ++++++++ stac_model/stac_model/runtime.py | 39 ++++++++-- stac_model/stac_model/schema.py | 20 +++++ 8 files changed, 168 insertions(+), 170 deletions(-) create mode 100644 stac_model/stac_model/input.py delete mode 100644 stac_model/stac_model/main.py create mode 100644 stac_model/stac_model/output.py create mode 100644 stac_model/stac_model/schema.py diff --git a/CHANGELOG.md b/CHANGELOG.md index b1471da..664fd18 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -29,7 +29,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Removed - Data Object, replaced with common metadata band object which also records data_type and nodata type -# TODO link release +# TODO link release here ## [Unreleased] diff --git a/README.md b/README.md index 69ebd45..45e133c 100644 --- a/README.md +++ b/README.md @@ -1,30 +1,31 @@ -# Deep Learning Model Extension Specification +# ML Model Extension Specification -![hackmd-github-sync-badge](https://hackmd.io/3fB1lrHSTcSHQS57UhVk-Q/badge)](https://hackmd.io/3fB1lrHSTcSHQS57UhVk-Q) +[![hackmd-github-sync-badge](https://hackmd.io/3fB1lrHSTcSHQS57UhVk-Q/badge)](https://hackmd.io/3fB1lrHSTcSHQS57UhVk-Q) -- **Title:** Deep Learning Model Extension -- **Identifier:** [https://schemas.stacspec.org/v1.0.0-beta.3/extensions/dl-model/json-schema/schema.json](https://schemas.stacspec.org/v1.0.0-beta.3/extensions/dl-model/json-schema/schema.json) -- **Field Name Prefix:** dlm +- **Title:** Model Extension +- **Identifier:** [https://schemas.stacspec.org/v1.0.0-beta.3/extensions/ml-model/json-schema/schema.json](https://schemas.stacspec.org/v1.0.0-beta.3/extensions/ml-model/json-schema/schema.json) +- **Field Name Prefix:** mlm - **Scope:** Item, Collection - **Extension Maturity Classification:** Proposal - **Owner:** - [@sfoucher](https://github.com/sfoucher) - [@fmigneault](https://github.com/fmigneault) - [@ymoisan](https://github.com/ymoisan) + - [@rbavery](https://github.com/rbavery) -This document explains the Template Extension to the [SpatioTemporal Asset Catalog (STAC)](https://github.com/radiantearth/stac-spec) specification. This document explains the fields of the STAC Deep Learning Model (dlm) Extension to a STAC Item. The main objective is to be able to build model collections that can be searched and that contain enough information to be able to deploy an inference service. When Deep Learning models are trained using satellite imagery, it is important to track essential information if you want to make them searchable and reusable: -1. Input data origin and specifications -2. Model basic transforms: rescale and normalization -3. Model output and its semantic interpretation -4. Runtime environment to be able to run the model +This document explains the Template Extension to the [SpatioTemporal Asset Catalog (STAC)](https://github.com/radiantearth/stac-spec) specification. This document explains the fields of the STAC Model Extension to a STAC Item. The main objective of the extension is two-fold: 1) to enable building model collections that can be searched alongside associated STAC datasets and 2) to record all necessary parameters, artifact locations, and high level processing steps to deploy an inference service. Specifically, this extension records the following info to make ML models searchable and reusable: +1. Sensor band specifications +2. The two fundamental transforms on model inputs: rescale and normalization +3. Model output shape, data type, and its semantic interpretation +4. An optional, flexible description of the runtime environment to be able to run the model 5. Scientific references -Check the original technical report [here](https://github.com/crim-ca/CCCOT03/raw/main/CCCOT03_Rapport%20Final_FINAL_EN.pdf) for more details. +Check the original technical report for an earlier version of the Model Extension [here](https://github.com/crim-ca/CCCOT03/raw/main/CCCOT03_Rapport%20Final_FINAL_EN.pdf) for more details. ![Image Description](https://i.imgur.com/cVAg5sA.png) - Examples: - - [Example with a UNet trained with thelper](examples/item.json) + - [Example with a ??? trained with torchgeo](examples/item.json) TODO update example - [Collection example](examples/collection.json): Shows the basic usage of the extension in a STAC Collection - [JSON Schema](json-schema/schema.json) - [Changelog](./CHANGELOG.md) @@ -33,11 +34,10 @@ Check the original technical report [here](https://github.com/crim-ca/CCCOT03/ra | Field Name | Type | Description | |------------------|---------------------------------------------|------------------------------------------------------------------------| -| bands | [Band Object](#bands) | Describes the EO data used to train or fine-tune the model. | -| dlm:input | [Input Object](#input-object) | Describes the transformation between the EO data and the model input. | -| dlm:architecture | [Architecture Object](#architecture-object) | Describes the model architecture. | -| dlm:runtime | [Runtime Object](#runtime-object) | Describes the runtime environments to run the model (inference). | -| dlm:output | [Output Object](#output-object) | Describes each model output and how to interpret it. | +| dlm:input | [ModelInput](#model-input) | Describes the transformation between the EO data and the model input. | +| dlm:architecture | [Architecture](#architecture) | Describes the model architecture. | +| dlm:runtime | [Runtime](#runtime) | Describes the runtime environments to run the model (inference). | +| dlm:output | [ModelOutput](#model-output) | Describes each model output and how to interpret it. | In addition, fields from the following extensions must be imported in the item: - [Scientific Extension Specification][stac-ext-sci] to describe relevant publications. @@ -47,29 +47,28 @@ In addition, fields from the following extensions must be imported in the item: [stac-ext-ver]: https://github.com/radiantearth/stac-spec/tree/v1.0.0-beta.2/extensions/version/README.md -### Bands - -We use the [STAC 1.1 Bands Object](https://github.com/radiantearth/stac-spec/pull/1254) for representing bands information, including nodata value, data type, and common band names. Only bands used to train or fine tune the model should be included in this list. - -A deviation is that we do not include the [Statistics](https://github.com/radiantearth/stac-spec/pull/1254/files#diff-2477b726f8c5d5d1c8b391be056db325e6918e78a24b414ccd757c7fbd574079R294) object at the band object level, but at the Model Input level. This is because in machine learning, we typically only need statistics for the dataset used to train the model in order to normalize any given bands input. - ### Model Input | Field Name | Type | Description | |-------------------------|---------------------------------|---------------------------------------------------------------------------------------------------------------------------------| -| name | string | Python name of the input variable. | -| band_names | list of common metadata band names | Specifies the ordering of the bands selected from the bands list described in [bands](#Bands). | -| input_tensors | [Tensor Object](#tensor-object) | Shape of the input tensor ($N \times C \times H \times W$). | +| name | string | Informative name of the input variable. Example "RGB Time Series" | +| bands | [Band](#bands) | Describes the EO data used to train or fine-tune the model. | +| input_arrays | [Array](#array) | Shape of the input arrays/tensors ($N \times C \times H \times W$). | | params | dict | dictionary with names for the parameters and their values. some models may take scalars or other non-tensor inputs. | -| scaling_factor | number | Scaling factor to apply to get data within `[0,1]`. For instance `scaling_factor=0.004` for 8-bit data. | +| scaling_factor | number | Scaling factor to apply to get data within a `[0,1]` range. For instance `scaling_factor=0.004` for 8-bit data. | | norm_by_channel | string | Whether to normalize each channel by channel-wise statistics or to normalize by dataset statistics. "True" or "False" | | norm_type | string | Normalization method. Select one option from "min_max", "z_score", "max_norm", "mean_norm", "unit_variance", "none" | | rescale_type | string | High level descriptor of the rescaling method to change image shape. Select one option from "crop", "pad", "interpolation", "none". If your rescaling method combines more than one of these operations, provide the name of the operation instead| -| statistics | [Statistics Object](https://github.com/radiantearth/stac-spec/pull/1254/files#diff-2477b726f8c5d5d1c8b391be056db325e6918e78a24b414ccd757c7fbd574079R294) | Dataset statistics for the training dataset used to normalize the inputs. | +| statistics | [Statistics](https://github.com/radiantearth/stac-spec/pull/1254/files#diff-2477b726f8c5d5d1c8b391be056db325e6918e78a24b414ccd757c7fbd574079R294) | Dataset statistics for the training dataset used to normalize the inputs. | | pre_processing_function | string | A url to the preprocessing function where normalization and rescaling takes place, and any other significant operations. Or, instead, the function code path, for example: my_python_module_name:my_processing_function| -#### Tensor Object +#### Bands and Statistics + +We use the [STAC 1.1 Bands Object](https://github.com/radiantearth/stac-spec/pull/1254) for representing bands information, including nodata value, data type, and common band names. Only bands used to train or fine tune the model should be included in this list. + +A deviation is that we do not include the [Statistics](https://github.com/radiantearth/stac-spec/pull/1254/files#diff-2477b726f8c5d5d1c8b391be056db325e6918e78a24b414ccd757c7fbd574079R294) object at the band object level, but at the Model Input level. This is because in machine learning, we typically only need statistics for the dataset used to train the model in order to normalize any given bands input. +#### Array | Field Name | Type | Description | |------------|--------|-------------------------------------| | batch | number | Batch size dimension (must be > 0). | @@ -77,10 +76,10 @@ A deviation is that we do not include the [Statistics](https://github.com/radian | channels | number | Number of channels (must be > 0). | | height | number | Height of the tensor (must be > 0). | | width | number | Width of the tensor (must be > 0). | -|dim_order | string | How the above dimensions are ordered with the tensor. "bhw", "bchw", "bthw", "btchw", "bcthw" | +|dim_order | string | How the above dimensions are ordered with the tensor. "bhw", "bchw", "bthw", "btchw" | -### Architecture Object +### Architecture | Field Name | Type | Description | |-------------------------|---------|-------------------------------------------------------------| @@ -91,21 +90,21 @@ A deviation is that we do not include the [Statistics](https://github.com/radian | summary | string | Summary of the layers, can be the output of `print(model)`. | | pretrained | string | Indicates the source of the pretraining (ex: ImageNet). | -### Runtime Object +### Runtime | Field Name | Type | Description | |-----------------------|------------------------------------|------------------------------------------------------------------------------------------| | framework | string | Used framework (ex: PyTorch, TensorFlow). | | version | string | Framework version (some models require a specific version of the framework). | -| model_artifact | string | Blob storage URI, POSIX filepath in docker image, or other URI type to the model file. | +| model_asset | [Asset Object](https://github.com/radiantearth/stac-spec/blob/master/collection-spec/collection-spec.md#asset-object) | Common Metadata Collection level asset object containing URI to the model file. | | model_handler | string | Inference execution function. | | model_src_url | string | Url of the source code (ex: GitHub repo). | | model_commit_hash | string | Hash value pointing to a specific version of the code. | -| docker | \[[Docker Object](#docker-object)] | Information for the deployment of the model in a docker instance. | +| docker | [Container](#container) | Information for the deployment of the model in a docker instance. | | batch_size_suggestion | number | A suggested batch size for a given compute instance type | | hardware_suggestion | str | A suggested cloud instance type or accelerator model | -#### Docker Object +#### Container | Field Name | Type | Description | |-------------|---------|-------------------------------------------------------| @@ -116,7 +115,7 @@ A deviation is that we do not include the [Statistics](https://github.com/radian | run | string | Running command. | | accelerator | boolean | True if the docker image requires a custom accelerator (CPU,TPU,MPS). | -### Output Object +### Output | Field Name | Type | Description | |--------------------------|-------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| diff --git a/stac_model/stac_model/__main__.py b/stac_model/stac_model/__main__.py index 5032827..5475a42 100644 --- a/stac_model/stac_model/__main__.py +++ b/stac_model/stac_model/__main__.py @@ -33,12 +33,17 @@ def main( ) -> None: """Generate example spec.""" - input_sig = TensorSignature( - name="input_tensor", dtype="float32", shape=(-1, 13, 64, 64) + input_array = Array( + dtype="float32", shape=[-1, 13, 64, 64], dim_ordering="bchw" ) - output_sig = TensorSignature(name="output_tensor", dtype="float32", shape=(-1, 10)) - model_sig = ModelSignature(inputs=[input_sig], outputs=[output_sig]) - model_artifact = ModelArtifact(path="s3://example/s3/uri/model.pt") + + band_list = [] + bands = [Band(name=b, description = f"Band {b}", nodata=-9999, data_type="float32", unit="reflectance") for b in band_list] + + model_input = ModelInput(name= "13 Band Sentinel-2 Batch", bands=bands, input_array=input_array, norm_by_channel=False, ) + + + model_artifact = ModelAsset(path="s3://example/s3/uri/model.pt") class_map = ClassMap( class_to_label_id={ "Annual Crop": 0, @@ -53,6 +58,7 @@ def main( "SeaLake": 9, } ) + output_sig = Array(name="output_tensor", dtype="float32", shape=(-1, 10)) meta = ModelMetadata( name="eurosat", class_map=class_map, diff --git a/stac_model/stac_model/input.py b/stac_model/stac_model/input.py new file mode 100644 index 0000000..1aa7c66 --- /dev/null +++ b/stac_model/stac_model/input.py @@ -0,0 +1,39 @@ +from typing import List, Optional, Literal, Dict, Literal +from pydantic import ( + BaseModel, + Field, + AnyUrl +) +class Array(BaseModel): + shape: Optional[List[int]] + dim_order: Literal["bhw", "bchw", "bthw", "btchw"] + dtype: str = Field(..., regex="^(uint8|uint16|int16|int32|float16|float32|float64)$") + +class Statistics(BaseModel): + minimum: List[float | int] + maximum: List[float | int] + mean: List[float] + stddev: List[float] + count: List[int] + valid_percent: List[float] + +class Band(BaseModel): + name: str + description: str + nodata: float | int | str + data_type: str + unit: Optional[str] + +class ModelInput(BaseModel): + name: str + bands: List[Band] + input_array: Array + params: Optional[ + Dict[str, int | float | str] + ] = None + scaling_factor: float + norm_by_channel: bool + norm_type: Literal["min_max", "z_score", "max_norm", "mean_norm", "unit_variance", "none"] + rescale_type: Literal["crop", "pad", "interpolation", "none"] + statistics: Optional[Statistics] + pre_processing_function: str | AnyUrl diff --git a/stac_model/stac_model/main.py b/stac_model/stac_model/main.py deleted file mode 100644 index eb01278..0000000 --- a/stac_model/stac_model/main.py +++ /dev/null @@ -1,121 +0,0 @@ -from pydantic import BaseModel, Field, AnyUrl -from typing import List, Optional, Literal, Any, List, Tuple, Dict, Optional, Literal -from pydantic import ( - BaseModel, - Field, - AnyUrl -) -from enum import Enum -from .runtime import ModelArtifact - -class Band(BaseModel): - name: str - description: str - nodata: float | int | str - data_type: str - unit: Optional[str] - -class TensorObject(BaseModel): - batch: int = Field(..., gt=0) - time: Optional[int] = Field(..., gt=0) - channels: Optional[int] = Field(..., gt=0) - height: int = Field(..., gt=0) - width: int = Field(..., gt=0) - dim_order: Literal["bhw", "bchw", "bthw", "btchw", "bcthw"] - -class Statistics(BaseModel): - minimum: List[float | int] - maximum: List[float | int] - mean: List[float] - stddev: List[float] - count: List[int] - valid_percent: List[float] - -class ModelInput(BaseModel): - name: str - band_names: List[str] - input_tensors: TensorObject - params: Optional[ - Dict[str, int | float | str] - ] = None - scaling_factor: float - norm_by_channel: str - norm_type: Literal["min_max", "z_score", "max_norm", "mean_norm", "unit_variance", "none"] - rescale_type: Literal["crop", "pad", "interpolation", "none"] - statistics: Optional[Statistics] - pre_processing_function: str | AnyUrl - -class ArchitectureObject(BaseModel): - total_parameters: int - on_disk_size_mb: float - ram_size_mb: float - model_type: str - summary: str - pretrained: str - - -class DockerObject(BaseModel): - docker_file: str - image_name: str - tag: str - working_dir: str - run: str - accelerator: bool - -class RuntimeObject(BaseModel): - framework: str - version: str - model_artifact: ModelArtifact - model_handler: str - model_src_url: str - model_commit_hash: str - docker: List[DockerObject] - batch_size_suggestion: int - hardware_suggestion: str | AnyUrl - -class TaskEnum(str, Enum): - regression = "regression" - classification = "classification" - object_detection = "object detection" - semantic_segmentation = "semantic segmentation" - instance_segmentation = "instance segmentation" - panoptic_segmentation = "panoptic segmentation" - multi_modal = "multi-modal" - similarity_search = "similarity search" - image_captioning = "image captioning" - generative = "generative" - -class ClassMap(BaseModel): - class_to_label_id: Dict[str, int] - - # Property to reverse the mapping - @property - def label_id_to_class(self) -> Dict[int, str]: - # Reverse the mapping - return {v: k for k, v in self.class_to_label_id.items()} - - def get_class(self, class_id: int) -> str: - """Get class name from class id.""" - if class_id not in self.label_id_to_class: - raise ValueError(f"Class ID '{class_id}' not found") - return self.label_id_to_class[class_id] - - def get_label_id(self, class_name: str) -> int: - """Get class id from class name.""" - if class_name not in self.class_to_label_id: - raise ValueError(f"Class name '{class_name}' not found") - return self.class_to_label_id[class_name] - -class OutputObject(BaseModel): - task: TaskEnum - number_of_classes: int - final_layer_size: List[int] - class_name_mapping: ClassMap - post_processing_function: str - -class DeepLearningModelExtension(BaseModel): - bands: List[Band] - dlm_input: ModelInput - dlm_architecture: ArchitectureObject - dlm_runtime: RuntimeObject - dlm_output: OutputObject diff --git a/stac_model/stac_model/output.py b/stac_model/stac_model/output.py new file mode 100644 index 0000000..a93150c --- /dev/null +++ b/stac_model/stac_model/output.py @@ -0,0 +1,30 @@ +from pydantic import BaseModel +from typing import List, Dict +from enum import Enum +class TaskEnum(str, Enum): + regression = "regression" + classification = "classification" + object_detection = "object detection" + semantic_segmentation = "semantic segmentation" + instance_segmentation = "instance segmentation" + panoptic_segmentation = "panoptic segmentation" + multi_modal = "multi-modal" + similarity_search = "similarity search" + image_captioning = "image captioning" + generative = "generative" + +class ClassMap(BaseModel): + class_to_label_id: Dict[str, int] + + # Property to reverse the mapping + @property + def label_id_to_class(self) -> Dict[int, str]: + # Reverse the mapping + return {v: k for k, v in self.class_to_label_id.items()} + +class ModelOutput(BaseModel): + task: TaskEnum + number_of_classes: int + final_layer_size: List[int] + class_name_mapping: ClassMap.class_to_label_id + post_processing_function: str diff --git a/stac_model/stac_model/runtime.py b/stac_model/stac_model/runtime.py index 6c1f830..f19c0e6 100644 --- a/stac_model/stac_model/runtime.py +++ b/stac_model/stac_model/runtime.py @@ -1,16 +1,22 @@ from .paths import S3Path -from pydantic import BaseModel, Field, FilePath, field_validator -from typing import Optional, Dict -class ModelArtifact(BaseModel): - """Information about the model location and other additional file locations.""" +from pydantic import BaseModel, Field, FilePath, AnyUrl, field_validator +from typing import Optional, List +class ModelAsset(BaseModel): + """Information about the model location and other additional file locations. Follows + the Asset Object spec: https://github.com/radiantearth/stac-spec/blob/master/collection-spec/collection-spec.md#asset-object + """ + + href: S3Path | FilePath | str = Field(...) + title: Optional[str] = Field(None) + description: Optional[str] = Field(None) + type: Optional[str] = Field(None) + roles: Optional[List[str]] = Field(None) - path: S3Path | FilePath | str = Field(...) - additional_files: Optional[Dict[str, FilePath]] = None class Config: arbitrary_types_allowed = True - @field_validator("path") + @field_validator("href") @classmethod def check_path_type(cls, v): if isinstance(v, str): @@ -23,3 +29,22 @@ def check_path_type(cls, v): f"Expected str, S3Path, or FilePath input, received {type(v).__name__}" ) return v + +class ContainerInfo(BaseModel): + container_file: str + image_name: str + tag: str + working_dir: str + run: str + accelerator: bool + +class Runtime(BaseModel): + framework: str + version: str + model_asset: ModelAsset + model_handler: str + model_src_url: str + model_commit_hash: str + container: List[ContainerInfo] + batch_size_suggestion: int + hardware_suggestion: str | AnyUrl diff --git a/stac_model/stac_model/schema.py b/stac_model/stac_model/schema.py new file mode 100644 index 0000000..890199e --- /dev/null +++ b/stac_model/stac_model/schema.py @@ -0,0 +1,20 @@ +from pydantic import BaseModel +from .input import ModelInput, Array, Band +from .output import ModelOutput, ClassMap +from .runtime import Runtime, ModelAsset + +class Architecture(BaseModel): + total_parameters: int + on_disk_size_mb: float + ram_size_mb: float + model_type: str + summary: str + pretrained: str + +class MLModel(BaseModel): + mlm_input: ModelInput + mlm_architecture: Architecture + mlm_runtime: Runtime + mlm_output: ModelOutput + +__all__ = ["MLModel", "ModelInput", "Array", "Band", "ModelOutput", "ModelAsset", "ClassMap", "Runtime", "Architecture"] From f2ccf4cb4e1f8e602492b65c3547d49924cbe80e Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Fri, 5 Jan 2024 16:36:00 -0800 Subject: [PATCH 006/112] mostly finish filling out object models --- stac_model/stac_model/__main__.py | 33 ++++++++++++++----------------- stac_model/stac_model/input.py | 30 ++++++++++++++-------------- stac_model/stac_model/output.py | 8 ++++---- stac_model/stac_model/schema.py | 14 +++++++------ 4 files changed, 42 insertions(+), 43 deletions(-) diff --git a/stac_model/stac_model/__main__.py b/stac_model/stac_model/__main__.py index 5475a42..87fc704 100644 --- a/stac_model/stac_model/__main__.py +++ b/stac_model/stac_model/__main__.py @@ -33,17 +33,21 @@ def main( ) -> None: """Generate example spec.""" - input_array = Array( + input_array = InputArray( dtype="float32", shape=[-1, 13, 64, 64], dim_ordering="bchw" ) - band_list = [] bands = [Band(name=b, description = f"Band {b}", nodata=-9999, data_type="float32", unit="reflectance") for b in band_list] - - model_input = ModelInput(name= "13 Band Sentinel-2 Batch", bands=bands, input_array=input_array, norm_by_channel=False, ) - - - model_artifact = ModelAsset(path="s3://example/s3/uri/model.pt") + stats = Statistics(mean=[1354.40546513, 1118.24399958, 1042.92983953, 947.62620298, 1199.47283961, + 1999.79090914, 2369.22292565, 2296.82608323, 732.08340178, 12.11327804, + 1819.01027855, 1118.92391149, 2594.14080798], + stddev= [245.71762908, 333.00778264, 395.09249139, 593.75055589, 566.4170017, + 861.18399006, 1086.63139075, 1117.98170791, 404.91978886, 4.77584468, + 1002.58768311, 761.30323499, 1231.58581042]) + mlm_input = ModelInput(name= "13 Band Sentinel-2 Batch", bands=bands, input_array=input_array, norm_by_channel=True, norm_type="z_score", rescale_type="None", statistics=stats, pre_processing_function = "https://github.com/microsoft/torchgeo/blob/545abe8326efc2848feae69d0212a15faba3eb00/torchgeo/datamodules/eurosat.py") + mlm_architecture = Architecture(name = "ResNet-18", total_parameters= 11_700_000, model_type= "torch", summary= "Sourced from torchgeo python library, identifier is ResNet18_Weights.SENTINEL2_ALL_MOCO", pretrained= True) + mlm_runtime = Runtime(framework= "torch", version= "2.1.2+cu121", model_asset= ModelAsset(href= "https://huggingface.co/torchgeo/resnet18_sentinel2_all_moco/resolve/main/resnet18_sentinel2_all_moco-59bfdff9.pth"), + model_handler= "torchgeo.models.resnet.ResNet18", model_src_url= "https://github.com/huggingface/pytorch-image-models/blob/b5a4fa9c3be6ac732807db7e87d176f5e4fc06f1/timm/models/resnet.py#L362") class_map = ClassMap( class_to_label_id={ "Annual Crop": 0, @@ -58,18 +62,11 @@ def main( "SeaLake": 9, } ) - output_sig = Array(name="output_tensor", dtype="float32", shape=(-1, 10)) - meta = ModelMetadata( - name="eurosat", - class_map=class_map, - signatures=model_sig, - artifact=model_artifact, - ml_model_processor_type="cpu", - ) - json_str = meta.model_dump_json(indent=2) + mlm_output = ModelOutput(task= "classification", number_of_classes= 10, output_shape=[-1, 10], class_name_mapping= class_map.class_to_label_id) + ml_model_meta = MLModel(mlm_input, mlm_architecture, mlm_runtime, mlm_output) + json_str = ml_model_meta.model_dump_json(indent=2) with open("example.json", "w") as file: file.write(json_str) - print(meta) - + print(ml_model_meta.model_dump_yaml(indent=2) if __name__ == "__main__": app() diff --git a/stac_model/stac_model/input.py b/stac_model/stac_model/input.py index 1aa7c66..71f998c 100644 --- a/stac_model/stac_model/input.py +++ b/stac_model/stac_model/input.py @@ -1,21 +1,21 @@ -from typing import List, Optional, Literal, Dict, Literal +from typing import List, Optional, Literal, Dict, Literal, Union from pydantic import ( BaseModel, Field, AnyUrl ) -class Array(BaseModel): - shape: Optional[List[int]] +class InputArray(BaseModel): + shape: List[Union[int,float]] dim_order: Literal["bhw", "bchw", "bthw", "btchw"] dtype: str = Field(..., regex="^(uint8|uint16|int16|int32|float16|float32|float64)$") class Statistics(BaseModel): - minimum: List[float | int] - maximum: List[float | int] - mean: List[float] - stddev: List[float] - count: List[int] - valid_percent: List[float] + minimum: Optional[List[Union[float, int]]] + maximum: Optional[List[Union[float, int]]] + mean: Optional[List[float]] + stddev: Optional[List[float]] + count: Optional[List[int]] + valid_percent: Optional[List[float]] class Band(BaseModel): name: str @@ -27,13 +27,13 @@ class Band(BaseModel): class ModelInput(BaseModel): name: str bands: List[Band] - input_array: Array + input_array: InputArray + norm_type: Literal["min_max", "z_score", "max_norm", "mean_norm", "unit_variance", "none"] + rescale_type: Literal["crop", "pad", "interpolation", "none"] + norm_by_channel: bool params: Optional[ Dict[str, int | float | str] ] = None - scaling_factor: float - norm_by_channel: bool - norm_type: Literal["min_max", "z_score", "max_norm", "mean_norm", "unit_variance", "none"] - rescale_type: Literal["crop", "pad", "interpolation", "none"] + scaling_factor: Optional[float] statistics: Optional[Statistics] - pre_processing_function: str | AnyUrl + pre_processing_function: Optional[str | AnyUrl] diff --git a/stac_model/stac_model/output.py b/stac_model/stac_model/output.py index a93150c..00c0d82 100644 --- a/stac_model/stac_model/output.py +++ b/stac_model/stac_model/output.py @@ -1,5 +1,5 @@ from pydantic import BaseModel -from typing import List, Dict +from typing import List, Dict, Union, Optional from enum import Enum class TaskEnum(str, Enum): regression = "regression" @@ -25,6 +25,6 @@ def label_id_to_class(self) -> Dict[int, str]: class ModelOutput(BaseModel): task: TaskEnum number_of_classes: int - final_layer_size: List[int] - class_name_mapping: ClassMap.class_to_label_id - post_processing_function: str + output_shape: List[Union[int,float]] + class_name_mapping: Optional[ClassMap.class_to_label_id] + post_processing_function: Optional[str] diff --git a/stac_model/stac_model/schema.py b/stac_model/stac_model/schema.py index 890199e..000af71 100644 --- a/stac_model/stac_model/schema.py +++ b/stac_model/stac_model/schema.py @@ -1,15 +1,17 @@ from pydantic import BaseModel -from .input import ModelInput, Array, Band +from typing import Optional +from .input import ModelInput, InputArray, Band, Statistics from .output import ModelOutput, ClassMap from .runtime import Runtime, ModelAsset class Architecture(BaseModel): - total_parameters: int - on_disk_size_mb: float - ram_size_mb: float + name: str model_type: str summary: str - pretrained: str + pretrained: bool + total_parameters: Optional[int] + on_disk_size_mb: Optional[float] + ram_size_mb: Optional[float] class MLModel(BaseModel): mlm_input: ModelInput @@ -17,4 +19,4 @@ class MLModel(BaseModel): mlm_runtime: Runtime mlm_output: ModelOutput -__all__ = ["MLModel", "ModelInput", "Array", "Band", "ModelOutput", "ModelAsset", "ClassMap", "Runtime", "Architecture"] +__all__ = ["MLModel", "ModelInput", "InputArray", "Band", "Statistics", "ModelOutput", "ModelAsset", "ClassMap", "Runtime", "ContainerInfo", "Model Asset", "Architecture"] From e81bee9639d876f418044a950a9b17e7473a9473 Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Fri, 5 Jan 2024 16:37:37 -0800 Subject: [PATCH 007/112] some changes to fields and language edits --- README.md | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index 45e133c..06ca73c 100644 --- a/README.md +++ b/README.md @@ -20,6 +20,8 @@ This document explains the Template Extension to the [SpatioTemporal Asset Catal 4. An optional, flexible description of the runtime environment to be able to run the model 5. Scientific references +Note: The spec is biased towards supervised ML models the produce classifications. However, fields that relate to supervised ML are optional and users can use the fields they need for different tasks. + Check the original technical report for an earlier version of the Model Extension [here](https://github.com/crim-ca/CCCOT03/raw/main/CCCOT03_Rapport%20Final_FINAL_EN.pdf) for more details. ![Image Description](https://i.imgur.com/cVAg5sA.png) @@ -53,8 +55,8 @@ In addition, fields from the following extensions must be imported in the item: |-------------------------|---------------------------------|---------------------------------------------------------------------------------------------------------------------------------| | name | string | Informative name of the input variable. Example "RGB Time Series" | | bands | [Band](#bands) | Describes the EO data used to train or fine-tune the model. | -| input_arrays | [Array](#array) | Shape of the input arrays/tensors ($N \times C \times H \times W$). | -| params | dict | dictionary with names for the parameters and their values. some models may take scalars or other non-tensor inputs. | +| input_array | [Array](#array) | Shape of the input array/tensor ($N \times C \times H \times W$). | +| params | dict | dictionary with names for the parameters and their values. some models may take multiple input arrays, scalars, other non-tensor inputs. | | scaling_factor | number | Scaling factor to apply to get data within a `[0,1]` range. For instance `scaling_factor=0.004` for 8-bit data. | | norm_by_channel | string | Whether to normalize each channel by channel-wise statistics or to normalize by dataset statistics. "True" or "False" | | norm_type | string | Normalization method. Select one option from "min_max", "z_score", "max_norm", "mean_norm", "unit_variance", "none" | @@ -71,24 +73,22 @@ A deviation is that we do not include the [Statistics](https://github.com/radian #### Array | Field Name | Type | Description | |------------|--------|-------------------------------------| -| batch | number | Batch size dimension (must be > 0). | -| time | number | Number of timesteps (must be > 0). | -| channels | number | Number of channels (must be > 0). | -| height | number | Height of the tensor (must be > 0). | -| width | number | Width of the tensor (must be > 0). | -|dim_order | string | How the above dimensions are ordered with the tensor. "bhw", "bchw", "bthw", "btchw" | +| shape | [integer] | Shape of the input array, including the batch size dimension. The batch size dimension must either be greater than 0 or -1 to indicate an unspecified batch dimension size. | +|dim_order | string | How the above dimensions are ordered with the tensor. "bhw", "bchw", "bthw", "btchw" are valid orderings where b=batch, c=channel, t=time, h=height, w=width| +|dtype | string | The data type of values in the array. Suggested to use [Numpy numerical types](https://numpy.org/devdocs/user/basics.types.html), omitting the numpy module, e.g. "float32" | ### Architecture | Field Name | Type | Description | |-------------------------|---------|-------------------------------------------------------------| -| total_parameters | integer | Total number of parameters. | -| on_disk_size_mb | number | The memory size on disk of the model artifact (MB). | -| ram_size_mb | number | number | The memory size in accelerator memory during inference (MB).| +| name | string | The name of the model architecture. For example, "ResNet-18" or "Random Forest" | | model_type | string | Type of network (ex: ResNet-18). | | summary | string | Summary of the layers, can be the output of `print(model)`. | | pretrained | string | Indicates the source of the pretraining (ex: ImageNet). | +| total_parameters | integer | Total number of parameters. | +| on_disk_size_mb | number | The memory size on disk of the model artifact (MB). | +| ram_size_mb | number | number | The memory size in accelerator memory during inference (MB).| ### Runtime @@ -121,7 +121,7 @@ A deviation is that we do not include the [Statistics](https://github.com/radian |--------------------------|-------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | task | [Task Enum](#task-enum) | Specifies the Machine Learning task for which the output can be used for. | | number_of_classes | integer | Number of classes. | -| final_layer_size | \[integer] | Sizes of the output tensor as ($N \times C \times H \times W$). | +| output_shape | \[integer] | Shape of the output array/tensor from the model For example ($N \times H \times W$). Use -1 to indicate variable dimensions, like the batch dimension. | | class_name_mapping | dict | Mapping of the output index to a short class name, for each record we specify the index and the class name. | | post_processing_function | string | A url to the postprocessing function where normalization and rescaling takes place, and any other significant operations. Or, instead, the function code path, for example: my_python_module_name:my_processing_function| From 964535402e76f4d4e1cce564f3c635fe6caa8822 Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Fri, 5 Jan 2024 17:34:54 -0800 Subject: [PATCH 008/112] poetry run stac-model generates json example --- stac_model/example.json | 110 +++--- stac_model/poetry.lock | 533 ++++++++++++++++-------------- stac_model/pyproject.toml | 6 +- stac_model/stac_model/__main__.py | 23 +- stac_model/stac_model/input.py | 24 +- stac_model/stac_model/output.py | 7 +- stac_model/stac_model/paths.py | 4 + stac_model/stac_model/runtime.py | 28 +- stac_model/stac_model/schema.py | 19 +- 9 files changed, 404 insertions(+), 350 deletions(-) diff --git a/stac_model/example.json b/stac_model/example.json index df5bc45..1b54835 100644 --- a/stac_model/example.json +++ b/stac_model/example.json @@ -1,36 +1,77 @@ { - "signatures": { - "inputs": [ - { - "name": "input_tensor", - "dtype": "float32", - "shape": [ - -1, - 13, - 64, - 64 - ] - } - ], - "outputs": [ - { - "name": "output_tensor", - "dtype": "float32", - "shape": [ - -1, - 10 - ] - } - ], - "params": null + "mlm_input": { + "name": "13 Band Sentinel-2 Batch", + "bands": [], + "input_array": { + "shape": [ + -1, + 13, + 64, + 64 + ], + "dim_order": "bchw", + "dtype": "float32" + }, + "norm_type": "z_score", + "rescale_type": "none", + "norm_by_channel": true, + "statistics": { + "mean": [ + 1354.40546513, + 1118.24399958, + 1042.92983953, + 947.62620298, + 1199.47283961, + 1999.79090914, + 2369.22292565, + 2296.82608323, + 732.08340178, + 12.11327804, + 1819.01027855, + 1118.92391149, + 2594.14080798 + ], + "stddev": [ + 245.71762908, + 333.00778264, + 395.09249139, + 593.75055589, + 566.4170017, + 861.18399006, + 1086.63139075, + 1117.98170791, + 404.91978886, + 4.77584468, + 1002.58768311, + 761.30323499, + 1231.58581042 + ] + }, + "pre_processing_function": "https://github.com/microsoft/torchgeo/blob/545abe8326efc2848feae69d0212a15faba3eb00/torchgeo/datamodules/eurosat.py" + }, + "mlm_architecture": { + "name": "ResNet-18", + "summary": "Sourced from torchgeo python library, identifier is ResNet18_Weights.SENTINEL2_ALL_MOCO", + "pretrained": true, + "total_parameters": 11700000 }, - "artifact": { - "path": "s3://example/s3/uri/model.pt", - "additional_files": null + "mlm_runtime": { + "framework": "torch", + "version": "2.1.2+cu121", + "asset": { + "href": "." + }, + "source_code_url": "https://github.com/huggingface/pytorch-image-models/blob/b5a4fa9c3be6ac732807db7e87d176f5e4fc06f1/timm/models/resnet.py#L362", + "handler": "torchgeo.models.resnet.ResNet18" }, - "id": "3fa03dceb4004b6e8a9e8591e4b3a99d", - "class_map": { - "class_to_label_id": { + "mlm_output": { + "task": "classification", + "number_of_classes": 10, + "output_shape": [ + -1, + 10 + ], + "class_name_mapping": { "Annual Crop": 0, "Forest": 1, "Herbaceous Vegetation": 2, @@ -42,12 +83,5 @@ "River": 8, "SeaLake": 9 } - }, - "runtime_config": null, - "name": "eurosat", - "ml_model_type": null, - "ml_model_processor_type": "cpu", - "ml_model_learning_approach": null, - "ml_model_prediction_type": null, - "ml_model_architecture": null + } } diff --git a/stac_model/poetry.lock b/stac_model/poetry.lock index 312b915..af058e3 100644 --- a/stac_model/poetry.lock +++ b/stac_model/poetry.lock @@ -180,79 +180,65 @@ files = [ {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, ] -[[package]] -name = "commonmark" -version = "0.9.1" -description = "Python parser for the CommonMark Markdown spec" -optional = false -python-versions = "*" -files = [ - {file = "commonmark-0.9.1-py2.py3-none-any.whl", hash = "sha256:da2f38c92590f83de410ba1a3cbceafbc74fee9def35f9251ba9a971d6d66fd9"}, - {file = "commonmark-0.9.1.tar.gz", hash = "sha256:452f9dc859be7f06631ddcb328b6919c67984aca654e5fefb3914d54691aed60"}, -] - -[package.extras] -test = ["flake8 (==3.7.8)", "hypothesis (==3.55.3)"] - [[package]] name = "coverage" -version = "7.3.2" +version = "7.4.0" description = "Code coverage measurement for Python" optional = false python-versions = ">=3.8" files = [ - {file = "coverage-7.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d872145f3a3231a5f20fd48500274d7df222e291d90baa2026cc5152b7ce86bf"}, - {file = "coverage-7.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:310b3bb9c91ea66d59c53fa4989f57d2436e08f18fb2f421a1b0b6b8cc7fffda"}, - {file = "coverage-7.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f47d39359e2c3779c5331fc740cf4bce6d9d680a7b4b4ead97056a0ae07cb49a"}, - {file = "coverage-7.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aa72dbaf2c2068404b9870d93436e6d23addd8bbe9295f49cbca83f6e278179c"}, - {file = "coverage-7.3.2-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:beaa5c1b4777f03fc63dfd2a6bd820f73f036bfb10e925fce067b00a340d0f3f"}, - {file = "coverage-7.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:dbc1b46b92186cc8074fee9d9fbb97a9dd06c6cbbef391c2f59d80eabdf0faa6"}, - {file = "coverage-7.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:315a989e861031334d7bee1f9113c8770472db2ac484e5b8c3173428360a9148"}, - {file = "coverage-7.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:d1bc430677773397f64a5c88cb522ea43175ff16f8bfcc89d467d974cb2274f9"}, - {file = "coverage-7.3.2-cp310-cp310-win32.whl", hash = "sha256:a889ae02f43aa45032afe364c8ae84ad3c54828c2faa44f3bfcafecb5c96b02f"}, - {file = "coverage-7.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:c0ba320de3fb8c6ec16e0be17ee1d3d69adcda99406c43c0409cb5c41788a611"}, - {file = "coverage-7.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ac8c802fa29843a72d32ec56d0ca792ad15a302b28ca6203389afe21f8fa062c"}, - {file = "coverage-7.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:89a937174104339e3a3ffcf9f446c00e3a806c28b1841c63edb2b369310fd074"}, - {file = "coverage-7.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e267e9e2b574a176ddb983399dec325a80dbe161f1a32715c780b5d14b5f583a"}, - {file = "coverage-7.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2443cbda35df0d35dcfb9bf8f3c02c57c1d6111169e3c85fc1fcc05e0c9f39a3"}, - {file = "coverage-7.3.2-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4175e10cc8dda0265653e8714b3174430b07c1dca8957f4966cbd6c2b1b8065a"}, - {file = "coverage-7.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0cbf38419fb1a347aaf63481c00f0bdc86889d9fbf3f25109cf96c26b403fda1"}, - {file = "coverage-7.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:5c913b556a116b8d5f6ef834038ba983834d887d82187c8f73dec21049abd65c"}, - {file = "coverage-7.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1981f785239e4e39e6444c63a98da3a1db8e971cb9ceb50a945ba6296b43f312"}, - {file = "coverage-7.3.2-cp311-cp311-win32.whl", hash = "sha256:43668cabd5ca8258f5954f27a3aaf78757e6acf13c17604d89648ecc0cc66640"}, - {file = "coverage-7.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10c39c0452bf6e694511c901426d6b5ac005acc0f78ff265dbe36bf81f808a2"}, - {file = "coverage-7.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:4cbae1051ab791debecc4a5dcc4a1ff45fc27b91b9aee165c8a27514dd160836"}, - {file = "coverage-7.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:12d15ab5833a997716d76f2ac1e4b4d536814fc213c85ca72756c19e5a6b3d63"}, - {file = "coverage-7.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c7bba973ebee5e56fe9251300c00f1579652587a9f4a5ed8404b15a0471f216"}, - {file = "coverage-7.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fe494faa90ce6381770746077243231e0b83ff3f17069d748f645617cefe19d4"}, - {file = "coverage-7.3.2-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6e9589bd04d0461a417562649522575d8752904d35c12907d8c9dfeba588faf"}, - {file = "coverage-7.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d51ac2a26f71da1b57f2dc81d0e108b6ab177e7d30e774db90675467c847bbdf"}, - {file = "coverage-7.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:99b89d9f76070237975b315b3d5f4d6956ae354a4c92ac2388a5695516e47c84"}, - {file = "coverage-7.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:fa28e909776dc69efb6ed975a63691bc8172b64ff357e663a1bb06ff3c9b589a"}, - {file = "coverage-7.3.2-cp312-cp312-win32.whl", hash = "sha256:289fe43bf45a575e3ab10b26d7b6f2ddb9ee2dba447499f5401cfb5ecb8196bb"}, - {file = "coverage-7.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:7dbc3ed60e8659bc59b6b304b43ff9c3ed858da2839c78b804973f613d3e92ed"}, - {file = "coverage-7.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f94b734214ea6a36fe16e96a70d941af80ff3bfd716c141300d95ebc85339738"}, - {file = "coverage-7.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:af3d828d2c1cbae52d34bdbb22fcd94d1ce715d95f1a012354a75e5913f1bda2"}, - {file = "coverage-7.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:630b13e3036e13c7adc480ca42fa7afc2a5d938081d28e20903cf7fd687872e2"}, - {file = "coverage-7.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c9eacf273e885b02a0273bb3a2170f30e2d53a6d53b72dbe02d6701b5296101c"}, - {file = "coverage-7.3.2-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d8f17966e861ff97305e0801134e69db33b143bbfb36436efb9cfff6ec7b2fd9"}, - {file = "coverage-7.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b4275802d16882cf9c8b3d057a0839acb07ee9379fa2749eca54efbce1535b82"}, - {file = "coverage-7.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:72c0cfa5250f483181e677ebc97133ea1ab3eb68645e494775deb6a7f6f83901"}, - {file = "coverage-7.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:cb536f0dcd14149425996821a168f6e269d7dcd2c273a8bff8201e79f5104e76"}, - {file = "coverage-7.3.2-cp38-cp38-win32.whl", hash = "sha256:307adb8bd3abe389a471e649038a71b4eb13bfd6b7dd9a129fa856f5c695cf92"}, - {file = "coverage-7.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:88ed2c30a49ea81ea3b7f172e0269c182a44c236eb394718f976239892c0a27a"}, - {file = "coverage-7.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b631c92dfe601adf8f5ebc7fc13ced6bb6e9609b19d9a8cd59fa47c4186ad1ce"}, - {file = "coverage-7.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d3d9df4051c4a7d13036524b66ecf7a7537d14c18a384043f30a303b146164e9"}, - {file = "coverage-7.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f7363d3b6a1119ef05015959ca24a9afc0ea8a02c687fe7e2d557705375c01f"}, - {file = "coverage-7.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2f11cc3c967a09d3695d2a6f03fb3e6236622b93be7a4b5dc09166a861be6d25"}, - {file = "coverage-7.3.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:149de1d2401ae4655c436a3dced6dd153f4c3309f599c3d4bd97ab172eaf02d9"}, - {file = "coverage-7.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:3a4006916aa6fee7cd38db3bfc95aa9c54ebb4ffbfc47c677c8bba949ceba0a6"}, - {file = "coverage-7.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9028a3871280110d6e1aa2df1afd5ef003bab5fb1ef421d6dc748ae1c8ef2ebc"}, - {file = "coverage-7.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9f805d62aec8eb92bab5b61c0f07329275b6f41c97d80e847b03eb894f38d083"}, - {file = "coverage-7.3.2-cp39-cp39-win32.whl", hash = "sha256:d1c88ec1a7ff4ebca0219f5b1ef863451d828cccf889c173e1253aa84b1e07ce"}, - {file = "coverage-7.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:b4767da59464bb593c07afceaddea61b154136300881844768037fd5e859353f"}, - {file = "coverage-7.3.2-pp38.pp39.pp310-none-any.whl", hash = "sha256:ae97af89f0fbf373400970c0a21eef5aa941ffeed90aee43650b81f7d7f47637"}, - {file = "coverage-7.3.2.tar.gz", hash = "sha256:be32ad29341b0170e795ca590e1c07e81fc061cb5b10c74ce7203491484404ef"}, + {file = "coverage-7.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:36b0ea8ab20d6a7564e89cb6135920bc9188fb5f1f7152e94e8300b7b189441a"}, + {file = "coverage-7.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0676cd0ba581e514b7f726495ea75aba3eb20899d824636c6f59b0ed2f88c471"}, + {file = "coverage-7.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0ca5c71a5a1765a0f8f88022c52b6b8be740e512980362f7fdbb03725a0d6b9"}, + {file = "coverage-7.4.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a7c97726520f784239f6c62506bc70e48d01ae71e9da128259d61ca5e9788516"}, + {file = "coverage-7.4.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:815ac2d0f3398a14286dc2cea223a6f338109f9ecf39a71160cd1628786bc6f5"}, + {file = "coverage-7.4.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:80b5ee39b7f0131ebec7968baa9b2309eddb35b8403d1869e08f024efd883566"}, + {file = "coverage-7.4.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:5b2ccb7548a0b65974860a78c9ffe1173cfb5877460e5a229238d985565574ae"}, + {file = "coverage-7.4.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:995ea5c48c4ebfd898eacb098164b3cc826ba273b3049e4a889658548e321b43"}, + {file = "coverage-7.4.0-cp310-cp310-win32.whl", hash = "sha256:79287fd95585ed36e83182794a57a46aeae0b64ca53929d1176db56aacc83451"}, + {file = "coverage-7.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:5b14b4f8760006bfdb6e08667af7bc2d8d9bfdb648351915315ea17645347137"}, + {file = "coverage-7.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:04387a4a6ecb330c1878907ce0dc04078ea72a869263e53c72a1ba5bbdf380ca"}, + {file = "coverage-7.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ea81d8f9691bb53f4fb4db603203029643caffc82bf998ab5b59ca05560f4c06"}, + {file = "coverage-7.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:74775198b702868ec2d058cb92720a3c5a9177296f75bd97317c787daf711505"}, + {file = "coverage-7.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:76f03940f9973bfaee8cfba70ac991825611b9aac047e5c80d499a44079ec0bc"}, + {file = "coverage-7.4.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:485e9f897cf4856a65a57c7f6ea3dc0d4e6c076c87311d4bc003f82cfe199d25"}, + {file = "coverage-7.4.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:6ae8c9d301207e6856865867d762a4b6fd379c714fcc0607a84b92ee63feff70"}, + {file = "coverage-7.4.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:bf477c355274a72435ceb140dc42de0dc1e1e0bf6e97195be30487d8eaaf1a09"}, + {file = "coverage-7.4.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:83c2dda2666fe32332f8e87481eed056c8b4d163fe18ecc690b02802d36a4d26"}, + {file = "coverage-7.4.0-cp311-cp311-win32.whl", hash = "sha256:697d1317e5290a313ef0d369650cfee1a114abb6021fa239ca12b4849ebbd614"}, + {file = "coverage-7.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:26776ff6c711d9d835557ee453082025d871e30b3fd6c27fcef14733f67f0590"}, + {file = "coverage-7.4.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:13eaf476ec3e883fe3e5fe3707caeb88268a06284484a3daf8250259ef1ba143"}, + {file = "coverage-7.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:846f52f46e212affb5bcf131c952fb4075b55aae6b61adc9856222df89cbe3e2"}, + {file = "coverage-7.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:26f66da8695719ccf90e794ed567a1549bb2644a706b41e9f6eae6816b398c4a"}, + {file = "coverage-7.4.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:164fdcc3246c69a6526a59b744b62e303039a81e42cfbbdc171c91a8cc2f9446"}, + {file = "coverage-7.4.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:316543f71025a6565677d84bc4df2114e9b6a615aa39fb165d697dba06a54af9"}, + {file = "coverage-7.4.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:bb1de682da0b824411e00a0d4da5a784ec6496b6850fdf8c865c1d68c0e318dd"}, + {file = "coverage-7.4.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:0e8d06778e8fbffccfe96331a3946237f87b1e1d359d7fbe8b06b96c95a5407a"}, + {file = "coverage-7.4.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a56de34db7b7ff77056a37aedded01b2b98b508227d2d0979d373a9b5d353daa"}, + {file = "coverage-7.4.0-cp312-cp312-win32.whl", hash = "sha256:51456e6fa099a8d9d91497202d9563a320513fcf59f33991b0661a4a6f2ad450"}, + {file = "coverage-7.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:cd3c1e4cb2ff0083758f09be0f77402e1bdf704adb7f89108007300a6da587d0"}, + {file = "coverage-7.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e9d1bf53c4c8de58d22e0e956a79a5b37f754ed1ffdbf1a260d9dcfa2d8a325e"}, + {file = "coverage-7.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:109f5985182b6b81fe33323ab4707011875198c41964f014579cf82cebf2bb85"}, + {file = "coverage-7.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3cc9d4bc55de8003663ec94c2f215d12d42ceea128da8f0f4036235a119c88ac"}, + {file = "coverage-7.4.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cc6d65b21c219ec2072c1293c505cf36e4e913a3f936d80028993dd73c7906b1"}, + {file = "coverage-7.4.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5a10a4920def78bbfff4eff8a05c51be03e42f1c3735be42d851f199144897ba"}, + {file = "coverage-7.4.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b8e99f06160602bc64da35158bb76c73522a4010f0649be44a4e167ff8555952"}, + {file = "coverage-7.4.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:7d360587e64d006402b7116623cebf9d48893329ef035278969fa3bbf75b697e"}, + {file = "coverage-7.4.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:29f3abe810930311c0b5d1a7140f6395369c3db1be68345638c33eec07535105"}, + {file = "coverage-7.4.0-cp38-cp38-win32.whl", hash = "sha256:5040148f4ec43644702e7b16ca864c5314ccb8ee0751ef617d49aa0e2d6bf4f2"}, + {file = "coverage-7.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:9864463c1c2f9cb3b5db2cf1ff475eed2f0b4285c2aaf4d357b69959941aa555"}, + {file = "coverage-7.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:936d38794044b26c99d3dd004d8af0035ac535b92090f7f2bb5aa9c8e2f5cd42"}, + {file = "coverage-7.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:799c8f873794a08cdf216aa5d0531c6a3747793b70c53f70e98259720a6fe2d7"}, + {file = "coverage-7.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e7defbb9737274023e2d7af02cac77043c86ce88a907c58f42b580a97d5bcca9"}, + {file = "coverage-7.4.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a1526d265743fb49363974b7aa8d5899ff64ee07df47dd8d3e37dcc0818f09ed"}, + {file = "coverage-7.4.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf635a52fc1ea401baf88843ae8708591aa4adff875e5c23220de43b1ccf575c"}, + {file = "coverage-7.4.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:756ded44f47f330666843b5781be126ab57bb57c22adbb07d83f6b519783b870"}, + {file = "coverage-7.4.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:0eb3c2f32dabe3a4aaf6441dde94f35687224dfd7eb2a7f47f3fd9428e421058"}, + {file = "coverage-7.4.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:bfd5db349d15c08311702611f3dccbef4b4e2ec148fcc636cf8739519b4a5c0f"}, + {file = "coverage-7.4.0-cp39-cp39-win32.whl", hash = "sha256:53d7d9158ee03956e0eadac38dfa1ec8068431ef8058fe6447043db1fb40d932"}, + {file = "coverage-7.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:cfd2a8b6b0d8e66e944d47cdec2f47c48fef2ba2f2dff5a9a75757f64172857e"}, + {file = "coverage-7.4.0-pp38.pp39.pp310-none-any.whl", hash = "sha256:c530833afc4707fe48524a44844493f36d8727f04dcce91fb978c414a8556cc6"}, + {file = "coverage-7.4.0.tar.gz", hash = "sha256:707c0f58cb1712b8809ece32b68996ee1e609f71bd14615bd8f87a1293cb610e"}, ] [package.dependencies] @@ -263,13 +249,13 @@ toml = ["tomli"] [[package]] name = "distlib" -version = "0.3.7" +version = "0.3.8" description = "Distribution utilities" optional = false python-versions = "*" files = [ - {file = "distlib-0.3.7-py2.py3-none-any.whl", hash = "sha256:2e24928bc811348f0feb63014e97aaae3037f2cf48712d51ae61df7fd6075057"}, - {file = "distlib-0.3.7.tar.gz", hash = "sha256:9dafe54b34a028eafd95039d5e5d4851a13734540f1331060d31c9916e7147a8"}, + {file = "distlib-0.3.8-py2.py3-none-any.whl", hash = "sha256:034db59a0b96f8ca18035f36290806a9a6e6bd9d1ff91e45a7f172eb17e51784"}, + {file = "distlib-0.3.8.tar.gz", hash = "sha256:1530ea13e350031b6312d8580ddb6b27a104275a31106523b8f123787f494f64"}, ] [[package]] @@ -399,6 +385,41 @@ files = [ {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, ] +[[package]] +name = "markdown-it-py" +version = "3.0.0" +description = "Python port of markdown-it. Markdown parsing, done right!" +optional = false +python-versions = ">=3.8" +files = [ + {file = "markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb"}, + {file = "markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1"}, +] + +[package.dependencies] +mdurl = ">=0.1,<1.0" + +[package.extras] +benchmarking = ["psutil", "pytest", "pytest-benchmark"] +code-style = ["pre-commit (>=3.0,<4.0)"] +compare = ["commonmark (>=0.9,<1.0)", "markdown (>=3.4,<4.0)", "mistletoe (>=1.0,<2.0)", "mistune (>=2.0,<3.0)", "panflute (>=2.3,<3.0)"] +linkify = ["linkify-it-py (>=1,<3)"] +plugins = ["mdit-py-plugins"] +profiling = ["gprof2dot"] +rtd = ["jupyter_sphinx", "mdit-py-plugins", "myst-parser", "pyyaml", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx_book_theme"] +testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] + +[[package]] +name = "mdurl" +version = "0.1.2" +description = "Markdown URL utilities" +optional = false +python-versions = ">=3.7" +files = [ + {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"}, + {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"}, +] + [[package]] name = "mypy" version = "1.0.1" @@ -471,47 +492,47 @@ setuptools = "*" [[package]] name = "numpy" -version = "1.26.2" +version = "1.26.3" description = "Fundamental package for array computing in Python" optional = false python-versions = ">=3.9" files = [ - {file = "numpy-1.26.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:3703fc9258a4a122d17043e57b35e5ef1c5a5837c3db8be396c82e04c1cf9b0f"}, - {file = "numpy-1.26.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:cc392fdcbd21d4be6ae1bb4475a03ce3b025cd49a9be5345d76d7585aea69440"}, - {file = "numpy-1.26.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:36340109af8da8805d8851ef1d74761b3b88e81a9bd80b290bbfed61bd2b4f75"}, - {file = "numpy-1.26.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bcc008217145b3d77abd3e4d5ef586e3bdfba8fe17940769f8aa09b99e856c00"}, - {file = "numpy-1.26.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3ced40d4e9e18242f70dd02d739e44698df3dcb010d31f495ff00a31ef6014fe"}, - {file = "numpy-1.26.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b272d4cecc32c9e19911891446b72e986157e6a1809b7b56518b4f3755267523"}, - {file = "numpy-1.26.2-cp310-cp310-win32.whl", hash = "sha256:22f8fc02fdbc829e7a8c578dd8d2e15a9074b630d4da29cda483337e300e3ee9"}, - {file = "numpy-1.26.2-cp310-cp310-win_amd64.whl", hash = "sha256:26c9d33f8e8b846d5a65dd068c14e04018d05533b348d9eaeef6c1bd787f9919"}, - {file = "numpy-1.26.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b96e7b9c624ef3ae2ae0e04fa9b460f6b9f17ad8b4bec6d7756510f1f6c0c841"}, - {file = "numpy-1.26.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:aa18428111fb9a591d7a9cc1b48150097ba6a7e8299fb56bdf574df650e7d1f1"}, - {file = "numpy-1.26.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:06fa1ed84aa60ea6ef9f91ba57b5ed963c3729534e6e54055fc151fad0423f0a"}, - {file = "numpy-1.26.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96ca5482c3dbdd051bcd1fce8034603d6ebfc125a7bd59f55b40d8f5d246832b"}, - {file = "numpy-1.26.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:854ab91a2906ef29dc3925a064fcd365c7b4da743f84b123002f6139bcb3f8a7"}, - {file = "numpy-1.26.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f43740ab089277d403aa07567be138fc2a89d4d9892d113b76153e0e412409f8"}, - {file = "numpy-1.26.2-cp311-cp311-win32.whl", hash = "sha256:a2bbc29fcb1771cd7b7425f98b05307776a6baf43035d3b80c4b0f29e9545186"}, - {file = "numpy-1.26.2-cp311-cp311-win_amd64.whl", hash = "sha256:2b3fca8a5b00184828d12b073af4d0fc5fdd94b1632c2477526f6bd7842d700d"}, - {file = "numpy-1.26.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:a4cd6ed4a339c21f1d1b0fdf13426cb3b284555c27ac2f156dfdaaa7e16bfab0"}, - {file = "numpy-1.26.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5d5244aabd6ed7f312268b9247be47343a654ebea52a60f002dc70c769048e75"}, - {file = "numpy-1.26.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a3cdb4d9c70e6b8c0814239ead47da00934666f668426fc6e94cce869e13fd7"}, - {file = "numpy-1.26.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa317b2325f7aa0a9471663e6093c210cb2ae9c0ad824732b307d2c51983d5b6"}, - {file = "numpy-1.26.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:174a8880739c16c925799c018f3f55b8130c1f7c8e75ab0a6fa9d41cab092fd6"}, - {file = "numpy-1.26.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:f79b231bf5c16b1f39c7f4875e1ded36abee1591e98742b05d8a0fb55d8a3eec"}, - {file = "numpy-1.26.2-cp312-cp312-win32.whl", hash = "sha256:4a06263321dfd3598cacb252f51e521a8cb4b6df471bb12a7ee5cbab20ea9167"}, - {file = "numpy-1.26.2-cp312-cp312-win_amd64.whl", hash = "sha256:b04f5dc6b3efdaab541f7857351aac359e6ae3c126e2edb376929bd3b7f92d7e"}, - {file = "numpy-1.26.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4eb8df4bf8d3d90d091e0146f6c28492b0be84da3e409ebef54349f71ed271ef"}, - {file = "numpy-1.26.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1a13860fdcd95de7cf58bd6f8bc5a5ef81c0b0625eb2c9a783948847abbef2c2"}, - {file = "numpy-1.26.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:64308ebc366a8ed63fd0bf426b6a9468060962f1a4339ab1074c228fa6ade8e3"}, - {file = "numpy-1.26.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baf8aab04a2c0e859da118f0b38617e5ee65d75b83795055fb66c0d5e9e9b818"}, - {file = "numpy-1.26.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d73a3abcac238250091b11caef9ad12413dab01669511779bc9b29261dd50210"}, - {file = "numpy-1.26.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:b361d369fc7e5e1714cf827b731ca32bff8d411212fccd29ad98ad622449cc36"}, - {file = "numpy-1.26.2-cp39-cp39-win32.whl", hash = "sha256:bd3f0091e845164a20bd5a326860c840fe2af79fa12e0469a12768a3ec578d80"}, - {file = "numpy-1.26.2-cp39-cp39-win_amd64.whl", hash = "sha256:2beef57fb031dcc0dc8fa4fe297a742027b954949cabb52a2a376c144e5e6060"}, - {file = "numpy-1.26.2-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:1cc3d5029a30fb5f06704ad6b23b35e11309491c999838c31f124fee32107c79"}, - {file = "numpy-1.26.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94cc3c222bb9fb5a12e334d0479b97bb2df446fbe622b470928f5284ffca3f8d"}, - {file = "numpy-1.26.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:fe6b44fb8fcdf7eda4ef4461b97b3f63c466b27ab151bec2366db8b197387841"}, - {file = "numpy-1.26.2.tar.gz", hash = "sha256:f65738447676ab5777f11e6bbbdb8ce11b785e105f690bc45966574816b6d3ea"}, + {file = "numpy-1.26.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:806dd64230dbbfaca8a27faa64e2f414bf1c6622ab78cc4264f7f5f028fee3bf"}, + {file = "numpy-1.26.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:02f98011ba4ab17f46f80f7f8f1c291ee7d855fcef0a5a98db80767a468c85cd"}, + {file = "numpy-1.26.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6d45b3ec2faed4baca41c76617fcdcfa4f684ff7a151ce6fc78ad3b6e85af0a6"}, + {file = "numpy-1.26.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bdd2b45bf079d9ad90377048e2747a0c82351989a2165821f0c96831b4a2a54b"}, + {file = "numpy-1.26.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:211ddd1e94817ed2d175b60b6374120244a4dd2287f4ece45d49228b4d529178"}, + {file = "numpy-1.26.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b1240f767f69d7c4c8a29adde2310b871153df9b26b5cb2b54a561ac85146485"}, + {file = "numpy-1.26.3-cp310-cp310-win32.whl", hash = "sha256:21a9484e75ad018974a2fdaa216524d64ed4212e418e0a551a2d83403b0531d3"}, + {file = "numpy-1.26.3-cp310-cp310-win_amd64.whl", hash = "sha256:9e1591f6ae98bcfac2a4bbf9221c0b92ab49762228f38287f6eeb5f3f55905ce"}, + {file = "numpy-1.26.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b831295e5472954104ecb46cd98c08b98b49c69fdb7040483aff799a755a7374"}, + {file = "numpy-1.26.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9e87562b91f68dd8b1c39149d0323b42e0082db7ddb8e934ab4c292094d575d6"}, + {file = "numpy-1.26.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c66d6fec467e8c0f975818c1796d25c53521124b7cfb760114be0abad53a0a2"}, + {file = "numpy-1.26.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f25e2811a9c932e43943a2615e65fc487a0b6b49218899e62e426e7f0a57eeda"}, + {file = "numpy-1.26.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:af36e0aa45e25c9f57bf684b1175e59ea05d9a7d3e8e87b7ae1a1da246f2767e"}, + {file = "numpy-1.26.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:51c7f1b344f302067b02e0f5b5d2daa9ed4a721cf49f070280ac202738ea7f00"}, + {file = "numpy-1.26.3-cp311-cp311-win32.whl", hash = "sha256:7ca4f24341df071877849eb2034948459ce3a07915c2734f1abb4018d9c49d7b"}, + {file = "numpy-1.26.3-cp311-cp311-win_amd64.whl", hash = "sha256:39763aee6dfdd4878032361b30b2b12593fb445ddb66bbac802e2113eb8a6ac4"}, + {file = "numpy-1.26.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:a7081fd19a6d573e1a05e600c82a1c421011db7935ed0d5c483e9dd96b99cf13"}, + {file = "numpy-1.26.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:12c70ac274b32bc00c7f61b515126c9205323703abb99cd41836e8125ea0043e"}, + {file = "numpy-1.26.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7f784e13e598e9594750b2ef6729bcd5a47f6cfe4a12cca13def35e06d8163e3"}, + {file = "numpy-1.26.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5f24750ef94d56ce6e33e4019a8a4d68cfdb1ef661a52cdaee628a56d2437419"}, + {file = "numpy-1.26.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:77810ef29e0fb1d289d225cabb9ee6cf4d11978a00bb99f7f8ec2132a84e0166"}, + {file = "numpy-1.26.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8ed07a90f5450d99dad60d3799f9c03c6566709bd53b497eb9ccad9a55867f36"}, + {file = "numpy-1.26.3-cp312-cp312-win32.whl", hash = "sha256:f73497e8c38295aaa4741bdfa4fda1a5aedda5473074369eca10626835445511"}, + {file = "numpy-1.26.3-cp312-cp312-win_amd64.whl", hash = "sha256:da4b0c6c699a0ad73c810736303f7fbae483bcb012e38d7eb06a5e3b432c981b"}, + {file = "numpy-1.26.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1666f634cb3c80ccbd77ec97bc17337718f56d6658acf5d3b906ca03e90ce87f"}, + {file = "numpy-1.26.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:18c3319a7d39b2c6a9e3bb75aab2304ab79a811ac0168a671a62e6346c29b03f"}, + {file = "numpy-1.26.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0b7e807d6888da0db6e7e75838444d62495e2b588b99e90dd80c3459594e857b"}, + {file = "numpy-1.26.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b4d362e17bcb0011738c2d83e0a65ea8ce627057b2fdda37678f4374a382a137"}, + {file = "numpy-1.26.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b8c275f0ae90069496068c714387b4a0eba5d531aace269559ff2b43655edd58"}, + {file = "numpy-1.26.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:cc0743f0302b94f397a4a65a660d4cd24267439eb16493fb3caad2e4389bccbb"}, + {file = "numpy-1.26.3-cp39-cp39-win32.whl", hash = "sha256:9bc6d1a7f8cedd519c4b7b1156d98e051b726bf160715b769106661d567b3f03"}, + {file = "numpy-1.26.3-cp39-cp39-win_amd64.whl", hash = "sha256:867e3644e208c8922a3be26fc6bbf112a035f50f0a86497f98f228c50c607bb2"}, + {file = "numpy-1.26.3-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:3c67423b3703f8fbd90f5adaa37f85b5794d3366948efe9a5190a5f3a83fc34e"}, + {file = "numpy-1.26.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:46f47ee566d98849323f01b349d58f2557f02167ee301e5e28809a8c0e27a2d0"}, + {file = "numpy-1.26.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a8474703bffc65ca15853d5fd4d06b18138ae90c17c8d12169968e998e448bb5"}, + {file = "numpy-1.26.3.tar.gz", hash = "sha256:697df43e2b6310ecc9d95f05d5ef20eacc09c7c4ecc9da3f235d39e71b7da1e4"}, ] [[package]] @@ -608,18 +629,18 @@ files = [ [[package]] name = "pydantic" -version = "2.3.0" +version = "2.5.3" description = "Data validation using Python type hints" optional = false python-versions = ">=3.7" files = [ - {file = "pydantic-2.3.0-py3-none-any.whl", hash = "sha256:45b5e446c6dfaad9444819a293b921a40e1db1aa61ea08aede0522529ce90e81"}, - {file = "pydantic-2.3.0.tar.gz", hash = "sha256:1607cc106602284cd4a00882986570472f193fde9cb1259bceeaedb26aa79a6d"}, + {file = "pydantic-2.5.3-py3-none-any.whl", hash = "sha256:d0caf5954bee831b6bfe7e338c32b9e30c85dfe080c843680783ac2b631673b4"}, + {file = "pydantic-2.5.3.tar.gz", hash = "sha256:b3ef57c62535b0941697cce638c08900d87fcb67e29cfa99e8a68f747f393f7a"}, ] [package.dependencies] annotated-types = ">=0.4.0" -pydantic-core = "2.6.3" +pydantic-core = "2.14.6" typing-extensions = ">=4.6.1" [package.extras] @@ -627,117 +648,116 @@ email = ["email-validator (>=2.0.0)"] [[package]] name = "pydantic-core" -version = "2.6.3" +version = "2.14.6" description = "" optional = false python-versions = ">=3.7" files = [ - {file = "pydantic_core-2.6.3-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:1a0ddaa723c48af27d19f27f1c73bdc615c73686d763388c8683fe34ae777bad"}, - {file = "pydantic_core-2.6.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5cfde4fab34dd1e3a3f7f3db38182ab6c95e4ea91cf322242ee0be5c2f7e3d2f"}, - {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5493a7027bfc6b108e17c3383959485087d5942e87eb62bbac69829eae9bc1f7"}, - {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:84e87c16f582f5c753b7f39a71bd6647255512191be2d2dbf49458c4ef024588"}, - {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:522a9c4a4d1924facce7270c84b5134c5cabcb01513213662a2e89cf28c1d309"}, - {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aaafc776e5edc72b3cad1ccedb5fd869cc5c9a591f1213aa9eba31a781be9ac1"}, - {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a750a83b2728299ca12e003d73d1264ad0440f60f4fc9cee54acc489249b728"}, - {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9e8b374ef41ad5c461efb7a140ce4730661aadf85958b5c6a3e9cf4e040ff4bb"}, - {file = "pydantic_core-2.6.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b594b64e8568cf09ee5c9501ede37066b9fc41d83d58f55b9952e32141256acd"}, - {file = "pydantic_core-2.6.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:2a20c533cb80466c1d42a43a4521669ccad7cf2967830ac62c2c2f9cece63e7e"}, - {file = "pydantic_core-2.6.3-cp310-none-win32.whl", hash = "sha256:04fe5c0a43dec39aedba0ec9579001061d4653a9b53a1366b113aca4a3c05ca7"}, - {file = "pydantic_core-2.6.3-cp310-none-win_amd64.whl", hash = "sha256:6bf7d610ac8f0065a286002a23bcce241ea8248c71988bda538edcc90e0c39ad"}, - {file = "pydantic_core-2.6.3-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:6bcc1ad776fffe25ea5c187a028991c031a00ff92d012ca1cc4714087e575973"}, - {file = "pydantic_core-2.6.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:df14f6332834444b4a37685810216cc8fe1fe91f447332cd56294c984ecbff1c"}, - {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0b7486d85293f7f0bbc39b34e1d8aa26210b450bbd3d245ec3d732864009819"}, - {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a892b5b1871b301ce20d40b037ffbe33d1407a39639c2b05356acfef5536d26a"}, - {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:883daa467865e5766931e07eb20f3e8152324f0adf52658f4d302242c12e2c32"}, - {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d4eb77df2964b64ba190eee00b2312a1fd7a862af8918ec70fc2d6308f76ac64"}, - {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ce8c84051fa292a5dc54018a40e2a1926fd17980a9422c973e3ebea017aa8da"}, - {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:22134a4453bd59b7d1e895c455fe277af9d9d9fbbcb9dc3f4a97b8693e7e2c9b"}, - {file = "pydantic_core-2.6.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:02e1c385095efbd997311d85c6021d32369675c09bcbfff3b69d84e59dc103f6"}, - {file = "pydantic_core-2.6.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d79f1f2f7ebdb9b741296b69049ff44aedd95976bfee38eb4848820628a99b50"}, - {file = "pydantic_core-2.6.3-cp311-none-win32.whl", hash = "sha256:430ddd965ffd068dd70ef4e4d74f2c489c3a313adc28e829dd7262cc0d2dd1e8"}, - {file = "pydantic_core-2.6.3-cp311-none-win_amd64.whl", hash = "sha256:84f8bb34fe76c68c9d96b77c60cef093f5e660ef8e43a6cbfcd991017d375950"}, - {file = "pydantic_core-2.6.3-cp311-none-win_arm64.whl", hash = "sha256:5a2a3c9ef904dcdadb550eedf3291ec3f229431b0084666e2c2aa8ff99a103a2"}, - {file = "pydantic_core-2.6.3-cp312-cp312-macosx_10_7_x86_64.whl", hash = "sha256:8421cf496e746cf8d6b677502ed9a0d1e4e956586cd8b221e1312e0841c002d5"}, - {file = "pydantic_core-2.6.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:bb128c30cf1df0ab78166ded1ecf876620fb9aac84d2413e8ea1594b588c735d"}, - {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37a822f630712817b6ecc09ccc378192ef5ff12e2c9bae97eb5968a6cdf3b862"}, - {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:240a015102a0c0cc8114f1cba6444499a8a4d0333e178bc504a5c2196defd456"}, - {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3f90e5e3afb11268628c89f378f7a1ea3f2fe502a28af4192e30a6cdea1e7d5e"}, - {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:340e96c08de1069f3d022a85c2a8c63529fd88709468373b418f4cf2c949fb0e"}, - {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1480fa4682e8202b560dcdc9eeec1005f62a15742b813c88cdc01d44e85308e5"}, - {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f14546403c2a1d11a130b537dda28f07eb6c1805a43dae4617448074fd49c282"}, - {file = "pydantic_core-2.6.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:a87c54e72aa2ef30189dc74427421e074ab4561cf2bf314589f6af5b37f45e6d"}, - {file = "pydantic_core-2.6.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:f93255b3e4d64785554e544c1c76cd32f4a354fa79e2eeca5d16ac2e7fdd57aa"}, - {file = "pydantic_core-2.6.3-cp312-none-win32.whl", hash = "sha256:f70dc00a91311a1aea124e5f64569ea44c011b58433981313202c46bccbec0e1"}, - {file = "pydantic_core-2.6.3-cp312-none-win_amd64.whl", hash = "sha256:23470a23614c701b37252618e7851e595060a96a23016f9a084f3f92f5ed5881"}, - {file = "pydantic_core-2.6.3-cp312-none-win_arm64.whl", hash = "sha256:1ac1750df1b4339b543531ce793b8fd5c16660a95d13aecaab26b44ce11775e9"}, - {file = "pydantic_core-2.6.3-cp37-cp37m-macosx_10_7_x86_64.whl", hash = "sha256:a53e3195f134bde03620d87a7e2b2f2046e0e5a8195e66d0f244d6d5b2f6d31b"}, - {file = "pydantic_core-2.6.3-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:f2969e8f72c6236c51f91fbb79c33821d12a811e2a94b7aa59c65f8dbdfad34a"}, - {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:672174480a85386dd2e681cadd7d951471ad0bb028ed744c895f11f9d51b9ebe"}, - {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:002d0ea50e17ed982c2d65b480bd975fc41086a5a2f9c924ef8fc54419d1dea3"}, - {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3ccc13afee44b9006a73d2046068d4df96dc5b333bf3509d9a06d1b42db6d8bf"}, - {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:439a0de139556745ae53f9cc9668c6c2053444af940d3ef3ecad95b079bc9987"}, - {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d63b7545d489422d417a0cae6f9898618669608750fc5e62156957e609e728a5"}, - {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b44c42edc07a50a081672e25dfe6022554b47f91e793066a7b601ca290f71e42"}, - {file = "pydantic_core-2.6.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1c721bfc575d57305dd922e6a40a8fe3f762905851d694245807a351ad255c58"}, - {file = "pydantic_core-2.6.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:5e4a2cf8c4543f37f5dc881de6c190de08096c53986381daebb56a355be5dfe6"}, - {file = "pydantic_core-2.6.3-cp37-none-win32.whl", hash = "sha256:d9b4916b21931b08096efed090327f8fe78e09ae8f5ad44e07f5c72a7eedb51b"}, - {file = "pydantic_core-2.6.3-cp37-none-win_amd64.whl", hash = "sha256:a8acc9dedd304da161eb071cc7ff1326aa5b66aadec9622b2574ad3ffe225525"}, - {file = "pydantic_core-2.6.3-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:5e9c068f36b9f396399d43bfb6defd4cc99c36215f6ff33ac8b9c14ba15bdf6b"}, - {file = "pydantic_core-2.6.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e61eae9b31799c32c5f9b7be906be3380e699e74b2db26c227c50a5fc7988698"}, - {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d85463560c67fc65cd86153a4975d0b720b6d7725cf7ee0b2d291288433fc21b"}, - {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9616567800bdc83ce136e5847d41008a1d602213d024207b0ff6cab6753fe645"}, - {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9e9b65a55bbabda7fccd3500192a79f6e474d8d36e78d1685496aad5f9dbd92c"}, - {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f468d520f47807d1eb5d27648393519655eadc578d5dd862d06873cce04c4d1b"}, - {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9680dd23055dd874173a3a63a44e7f5a13885a4cfd7e84814be71be24fba83db"}, - {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9a718d56c4d55efcfc63f680f207c9f19c8376e5a8a67773535e6f7e80e93170"}, - {file = "pydantic_core-2.6.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8ecbac050856eb6c3046dea655b39216597e373aa8e50e134c0e202f9c47efec"}, - {file = "pydantic_core-2.6.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:788be9844a6e5c4612b74512a76b2153f1877cd845410d756841f6c3420230eb"}, - {file = "pydantic_core-2.6.3-cp38-none-win32.whl", hash = "sha256:07a1aec07333bf5adebd8264047d3dc518563d92aca6f2f5b36f505132399efc"}, - {file = "pydantic_core-2.6.3-cp38-none-win_amd64.whl", hash = "sha256:621afe25cc2b3c4ba05fff53525156d5100eb35c6e5a7cf31d66cc9e1963e378"}, - {file = "pydantic_core-2.6.3-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:813aab5bfb19c98ae370952b6f7190f1e28e565909bfc219a0909db168783465"}, - {file = "pydantic_core-2.6.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:50555ba3cb58f9861b7a48c493636b996a617db1a72c18da4d7f16d7b1b9952b"}, - {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19e20f8baedd7d987bd3f8005c146e6bcbda7cdeefc36fad50c66adb2dd2da48"}, - {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b0a5d7edb76c1c57b95df719af703e796fc8e796447a1da939f97bfa8a918d60"}, - {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f06e21ad0b504658a3a9edd3d8530e8cea5723f6ea5d280e8db8efc625b47e49"}, - {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ea053cefa008fda40f92aab937fb9f183cf8752e41dbc7bc68917884454c6362"}, - {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:171a4718860790f66d6c2eda1d95dd1edf64f864d2e9f9115840840cf5b5713f"}, - {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5ed7ceca6aba5331ece96c0e328cd52f0dcf942b8895a1ed2642de50800b79d3"}, - {file = "pydantic_core-2.6.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:acafc4368b289a9f291e204d2c4c75908557d4f36bd3ae937914d4529bf62a76"}, - {file = "pydantic_core-2.6.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:1aa712ba150d5105814e53cb141412217146fedc22621e9acff9236d77d2a5ef"}, - {file = "pydantic_core-2.6.3-cp39-none-win32.whl", hash = "sha256:44b4f937b992394a2e81a5c5ce716f3dcc1237281e81b80c748b2da6dd5cf29a"}, - {file = "pydantic_core-2.6.3-cp39-none-win_amd64.whl", hash = "sha256:9b33bf9658cb29ac1a517c11e865112316d09687d767d7a0e4a63d5c640d1b17"}, - {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-macosx_10_7_x86_64.whl", hash = "sha256:d7050899026e708fb185e174c63ebc2c4ee7a0c17b0a96ebc50e1f76a231c057"}, - {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:99faba727727b2e59129c59542284efebbddade4f0ae6a29c8b8d3e1f437beb7"}, - {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5fa159b902d22b283b680ef52b532b29554ea2a7fc39bf354064751369e9dbd7"}, - {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:046af9cfb5384f3684eeb3f58a48698ddab8dd870b4b3f67f825353a14441418"}, - {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:930bfe73e665ebce3f0da2c6d64455098aaa67e1a00323c74dc752627879fc67"}, - {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:85cc4d105747d2aa3c5cf3e37dac50141bff779545ba59a095f4a96b0a460e70"}, - {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:b25afe9d5c4f60dcbbe2b277a79be114e2e65a16598db8abee2a2dcde24f162b"}, - {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:e49ce7dc9f925e1fb010fc3d555250139df61fa6e5a0a95ce356329602c11ea9"}, - {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-macosx_10_7_x86_64.whl", hash = "sha256:2dd50d6a1aef0426a1d0199190c6c43ec89812b1f409e7fe44cb0fbf6dfa733c"}, - {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c6595b0d8c8711e8e1dc389d52648b923b809f68ac1c6f0baa525c6440aa0daa"}, - {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4ef724a059396751aef71e847178d66ad7fc3fc969a1a40c29f5aac1aa5f8784"}, - {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3c8945a105f1589ce8a693753b908815e0748f6279959a4530f6742e1994dcb6"}, - {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:c8c6660089a25d45333cb9db56bb9e347241a6d7509838dbbd1931d0e19dbc7f"}, - {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:692b4ff5c4e828a38716cfa92667661a39886e71136c97b7dac26edef18767f7"}, - {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:f1a5d8f18877474c80b7711d870db0eeef9442691fcdb00adabfc97e183ee0b0"}, - {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:3796a6152c545339d3b1652183e786df648ecdf7c4f9347e1d30e6750907f5bb"}, - {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:b962700962f6e7a6bd77e5f37320cabac24b4c0f76afeac05e9f93cf0c620014"}, - {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:56ea80269077003eaa59723bac1d8bacd2cd15ae30456f2890811efc1e3d4413"}, - {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75c0ebbebae71ed1e385f7dfd9b74c1cff09fed24a6df43d326dd7f12339ec34"}, - {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:252851b38bad3bfda47b104ffd077d4f9604a10cb06fe09d020016a25107bf98"}, - {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:6656a0ae383d8cd7cc94e91de4e526407b3726049ce8d7939049cbfa426518c8"}, - {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:d9140ded382a5b04a1c030b593ed9bf3088243a0a8b7fa9f071a5736498c5483"}, - {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:d38bbcef58220f9c81e42c255ef0bf99735d8f11edef69ab0b499da77105158a"}, - {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:c9d469204abcca28926cbc28ce98f28e50e488767b084fb3fbdf21af11d3de26"}, - {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:48c1ed8b02ffea4d5c9c220eda27af02b8149fe58526359b3c07eb391cb353a2"}, - {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b2b1bfed698fa410ab81982f681f5b1996d3d994ae8073286515ac4d165c2e7"}, - {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf9d42a71a4d7a7c1f14f629e5c30eac451a6fc81827d2beefd57d014c006c4a"}, - {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4292ca56751aebbe63a84bbfc3b5717abb09b14d4b4442cc43fd7c49a1529efd"}, - {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:7dc2ce039c7290b4ef64334ec7e6ca6494de6eecc81e21cb4f73b9b39991408c"}, - {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:615a31b1629e12445c0e9fc8339b41aaa6cc60bd53bf802d5fe3d2c0cda2ae8d"}, - {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:1fa1f6312fb84e8c281f32b39affe81984ccd484da6e9d65b3d18c202c666149"}, - {file = "pydantic_core-2.6.3.tar.gz", hash = "sha256:1508f37ba9e3ddc0189e6ff4e2228bd2d3c3a4641cbe8c07177162f76ed696c7"}, + {file = "pydantic_core-2.14.6-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:72f9a942d739f09cd42fffe5dc759928217649f070056f03c70df14f5770acf9"}, + {file = "pydantic_core-2.14.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6a31d98c0d69776c2576dda4b77b8e0c69ad08e8b539c25c7d0ca0dc19a50d6c"}, + {file = "pydantic_core-2.14.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5aa90562bc079c6c290f0512b21768967f9968e4cfea84ea4ff5af5d917016e4"}, + {file = "pydantic_core-2.14.6-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:370ffecb5316ed23b667d99ce4debe53ea664b99cc37bfa2af47bc769056d534"}, + {file = "pydantic_core-2.14.6-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f85f3843bdb1fe80e8c206fe6eed7a1caeae897e496542cee499c374a85c6e08"}, + {file = "pydantic_core-2.14.6-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9862bf828112e19685b76ca499b379338fd4c5c269d897e218b2ae8fcb80139d"}, + {file = "pydantic_core-2.14.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:036137b5ad0cb0004c75b579445a1efccd072387a36c7f217bb8efd1afbe5245"}, + {file = "pydantic_core-2.14.6-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:92879bce89f91f4b2416eba4429c7b5ca22c45ef4a499c39f0c5c69257522c7c"}, + {file = "pydantic_core-2.14.6-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0c08de15d50fa190d577e8591f0329a643eeaed696d7771760295998aca6bc66"}, + {file = "pydantic_core-2.14.6-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:36099c69f6b14fc2c49d7996cbf4f87ec4f0e66d1c74aa05228583225a07b590"}, + {file = "pydantic_core-2.14.6-cp310-none-win32.whl", hash = "sha256:7be719e4d2ae6c314f72844ba9d69e38dff342bc360379f7c8537c48e23034b7"}, + {file = "pydantic_core-2.14.6-cp310-none-win_amd64.whl", hash = "sha256:36fa402dcdc8ea7f1b0ddcf0df4254cc6b2e08f8cd80e7010d4c4ae6e86b2a87"}, + {file = "pydantic_core-2.14.6-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:dea7fcd62915fb150cdc373212141a30037e11b761fbced340e9db3379b892d4"}, + {file = "pydantic_core-2.14.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ffff855100bc066ff2cd3aa4a60bc9534661816b110f0243e59503ec2df38421"}, + {file = "pydantic_core-2.14.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b027c86c66b8627eb90e57aee1f526df77dc6d8b354ec498be9a757d513b92b"}, + {file = "pydantic_core-2.14.6-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:00b1087dabcee0b0ffd104f9f53d7d3eaddfaa314cdd6726143af6bc713aa27e"}, + {file = "pydantic_core-2.14.6-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:75ec284328b60a4e91010c1acade0c30584f28a1f345bc8f72fe8b9e46ec6a96"}, + {file = "pydantic_core-2.14.6-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7e1f4744eea1501404b20b0ac059ff7e3f96a97d3e3f48ce27a139e053bb370b"}, + {file = "pydantic_core-2.14.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b2602177668f89b38b9f84b7b3435d0a72511ddef45dc14446811759b82235a1"}, + {file = "pydantic_core-2.14.6-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6c8edaea3089bf908dd27da8f5d9e395c5b4dc092dbcce9b65e7156099b4b937"}, + {file = "pydantic_core-2.14.6-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:478e9e7b360dfec451daafe286998d4a1eeaecf6d69c427b834ae771cad4b622"}, + {file = "pydantic_core-2.14.6-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:b6ca36c12a5120bad343eef193cc0122928c5c7466121da7c20f41160ba00ba2"}, + {file = "pydantic_core-2.14.6-cp311-none-win32.whl", hash = "sha256:2b8719037e570639e6b665a4050add43134d80b687288ba3ade18b22bbb29dd2"}, + {file = "pydantic_core-2.14.6-cp311-none-win_amd64.whl", hash = "sha256:78ee52ecc088c61cce32b2d30a826f929e1708f7b9247dc3b921aec367dc1b23"}, + {file = "pydantic_core-2.14.6-cp311-none-win_arm64.whl", hash = "sha256:a19b794f8fe6569472ff77602437ec4430f9b2b9ec7a1105cfd2232f9ba355e6"}, + {file = "pydantic_core-2.14.6-cp312-cp312-macosx_10_7_x86_64.whl", hash = "sha256:667aa2eac9cd0700af1ddb38b7b1ef246d8cf94c85637cbb03d7757ca4c3fdec"}, + {file = "pydantic_core-2.14.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cdee837710ef6b56ebd20245b83799fce40b265b3b406e51e8ccc5b85b9099b7"}, + {file = "pydantic_core-2.14.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2c5bcf3414367e29f83fd66f7de64509a8fd2368b1edf4351e862910727d3e51"}, + {file = "pydantic_core-2.14.6-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:26a92ae76f75d1915806b77cf459811e772d8f71fd1e4339c99750f0e7f6324f"}, + {file = "pydantic_core-2.14.6-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a983cca5ed1dd9a35e9e42ebf9f278d344603bfcb174ff99a5815f953925140a"}, + {file = "pydantic_core-2.14.6-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cb92f9061657287eded380d7dc455bbf115430b3aa4741bdc662d02977e7d0af"}, + {file = "pydantic_core-2.14.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4ace1e220b078c8e48e82c081e35002038657e4b37d403ce940fa679e57113b"}, + {file = "pydantic_core-2.14.6-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ef633add81832f4b56d3b4c9408b43d530dfca29e68fb1b797dcb861a2c734cd"}, + {file = "pydantic_core-2.14.6-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7e90d6cc4aad2cc1f5e16ed56e46cebf4877c62403a311af20459c15da76fd91"}, + {file = "pydantic_core-2.14.6-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:e8a5ac97ea521d7bde7621d86c30e86b798cdecd985723c4ed737a2aa9e77d0c"}, + {file = "pydantic_core-2.14.6-cp312-none-win32.whl", hash = "sha256:f27207e8ca3e5e021e2402ba942e5b4c629718e665c81b8b306f3c8b1ddbb786"}, + {file = "pydantic_core-2.14.6-cp312-none-win_amd64.whl", hash = "sha256:b3e5fe4538001bb82e2295b8d2a39356a84694c97cb73a566dc36328b9f83b40"}, + {file = "pydantic_core-2.14.6-cp312-none-win_arm64.whl", hash = "sha256:64634ccf9d671c6be242a664a33c4acf12882670b09b3f163cd00a24cffbd74e"}, + {file = "pydantic_core-2.14.6-cp37-cp37m-macosx_10_7_x86_64.whl", hash = "sha256:24368e31be2c88bd69340fbfe741b405302993242ccb476c5c3ff48aeee1afe0"}, + {file = "pydantic_core-2.14.6-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:e33b0834f1cf779aa839975f9d8755a7c2420510c0fa1e9fa0497de77cd35d2c"}, + {file = "pydantic_core-2.14.6-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6af4b3f52cc65f8a0bc8b1cd9676f8c21ef3e9132f21fed250f6958bd7223bed"}, + {file = "pydantic_core-2.14.6-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d15687d7d7f40333bd8266f3814c591c2e2cd263fa2116e314f60d82086e353a"}, + {file = "pydantic_core-2.14.6-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:095b707bb287bfd534044166ab767bec70a9bba3175dcdc3371782175c14e43c"}, + {file = "pydantic_core-2.14.6-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:94fc0e6621e07d1e91c44e016cc0b189b48db053061cc22d6298a611de8071bb"}, + {file = "pydantic_core-2.14.6-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ce830e480f6774608dedfd4a90c42aac4a7af0a711f1b52f807130c2e434c06"}, + {file = "pydantic_core-2.14.6-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a306cdd2ad3a7d795d8e617a58c3a2ed0f76c8496fb7621b6cd514eb1532cae8"}, + {file = "pydantic_core-2.14.6-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:2f5fa187bde8524b1e37ba894db13aadd64faa884657473b03a019f625cee9a8"}, + {file = "pydantic_core-2.14.6-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:438027a975cc213a47c5d70672e0d29776082155cfae540c4e225716586be75e"}, + {file = "pydantic_core-2.14.6-cp37-none-win32.whl", hash = "sha256:f96ae96a060a8072ceff4cfde89d261837b4294a4f28b84a28765470d502ccc6"}, + {file = "pydantic_core-2.14.6-cp37-none-win_amd64.whl", hash = "sha256:e646c0e282e960345314f42f2cea5e0b5f56938c093541ea6dbf11aec2862391"}, + {file = "pydantic_core-2.14.6-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:db453f2da3f59a348f514cfbfeb042393b68720787bbef2b4c6068ea362c8149"}, + {file = "pydantic_core-2.14.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3860c62057acd95cc84044e758e47b18dcd8871a328ebc8ccdefd18b0d26a21b"}, + {file = "pydantic_core-2.14.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:36026d8f99c58d7044413e1b819a67ca0e0b8ebe0f25e775e6c3d1fabb3c38fb"}, + {file = "pydantic_core-2.14.6-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8ed1af8692bd8d2a29d702f1a2e6065416d76897d726e45a1775b1444f5928a7"}, + {file = "pydantic_core-2.14.6-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:314ccc4264ce7d854941231cf71b592e30d8d368a71e50197c905874feacc8a8"}, + {file = "pydantic_core-2.14.6-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:982487f8931067a32e72d40ab6b47b1628a9c5d344be7f1a4e668fb462d2da42"}, + {file = "pydantic_core-2.14.6-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2dbe357bc4ddda078f79d2a36fc1dd0494a7f2fad83a0a684465b6f24b46fe80"}, + {file = "pydantic_core-2.14.6-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2f6ffc6701a0eb28648c845f4945a194dc7ab3c651f535b81793251e1185ac3d"}, + {file = "pydantic_core-2.14.6-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:7f5025db12fc6de7bc1104d826d5aee1d172f9ba6ca936bf6474c2148ac336c1"}, + {file = "pydantic_core-2.14.6-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:dab03ed811ed1c71d700ed08bde8431cf429bbe59e423394f0f4055f1ca0ea60"}, + {file = "pydantic_core-2.14.6-cp38-none-win32.whl", hash = "sha256:dfcbebdb3c4b6f739a91769aea5ed615023f3c88cb70df812849aef634c25fbe"}, + {file = "pydantic_core-2.14.6-cp38-none-win_amd64.whl", hash = "sha256:99b14dbea2fdb563d8b5a57c9badfcd72083f6006caf8e126b491519c7d64ca8"}, + {file = "pydantic_core-2.14.6-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:4ce8299b481bcb68e5c82002b96e411796b844d72b3e92a3fbedfe8e19813eab"}, + {file = "pydantic_core-2.14.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b9a9d92f10772d2a181b5ca339dee066ab7d1c9a34ae2421b2a52556e719756f"}, + {file = "pydantic_core-2.14.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fd9e98b408384989ea4ab60206b8e100d8687da18b5c813c11e92fd8212a98e0"}, + {file = "pydantic_core-2.14.6-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4f86f1f318e56f5cbb282fe61eb84767aee743ebe32c7c0834690ebea50c0a6b"}, + {file = "pydantic_core-2.14.6-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86ce5fcfc3accf3a07a729779d0b86c5d0309a4764c897d86c11089be61da160"}, + {file = "pydantic_core-2.14.6-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3dcf1978be02153c6a31692d4fbcc2a3f1db9da36039ead23173bc256ee3b91b"}, + {file = "pydantic_core-2.14.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eedf97be7bc3dbc8addcef4142f4b4164066df0c6f36397ae4aaed3eb187d8ab"}, + {file = "pydantic_core-2.14.6-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d5f916acf8afbcab6bacbb376ba7dc61f845367901ecd5e328fc4d4aef2fcab0"}, + {file = "pydantic_core-2.14.6-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:8a14c192c1d724c3acbfb3f10a958c55a2638391319ce8078cb36c02283959b9"}, + {file = "pydantic_core-2.14.6-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0348b1dc6b76041516e8a854ff95b21c55f5a411c3297d2ca52f5528e49d8411"}, + {file = "pydantic_core-2.14.6-cp39-none-win32.whl", hash = "sha256:de2a0645a923ba57c5527497daf8ec5df69c6eadf869e9cd46e86349146e5975"}, + {file = "pydantic_core-2.14.6-cp39-none-win_amd64.whl", hash = "sha256:aca48506a9c20f68ee61c87f2008f81f8ee99f8d7f0104bff3c47e2d148f89d9"}, + {file = "pydantic_core-2.14.6-pp310-pypy310_pp73-macosx_10_7_x86_64.whl", hash = "sha256:d5c28525c19f5bb1e09511669bb57353d22b94cf8b65f3a8d141c389a55dec95"}, + {file = "pydantic_core-2.14.6-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:78d0768ee59baa3de0f4adac9e3748b4b1fffc52143caebddfd5ea2961595277"}, + {file = "pydantic_core-2.14.6-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b93785eadaef932e4fe9c6e12ba67beb1b3f1e5495631419c784ab87e975670"}, + {file = "pydantic_core-2.14.6-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a874f21f87c485310944b2b2734cd6d318765bcbb7515eead33af9641816506e"}, + {file = "pydantic_core-2.14.6-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b89f4477d915ea43b4ceea6756f63f0288941b6443a2b28c69004fe07fde0d0d"}, + {file = "pydantic_core-2.14.6-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:172de779e2a153d36ee690dbc49c6db568d7b33b18dc56b69a7514aecbcf380d"}, + {file = "pydantic_core-2.14.6-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:dfcebb950aa7e667ec226a442722134539e77c575f6cfaa423f24371bb8d2e94"}, + {file = "pydantic_core-2.14.6-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:55a23dcd98c858c0db44fc5c04fc7ed81c4b4d33c653a7c45ddaebf6563a2f66"}, + {file = "pydantic_core-2.14.6-pp37-pypy37_pp73-macosx_10_7_x86_64.whl", hash = "sha256:4241204e4b36ab5ae466ecec5c4c16527a054c69f99bba20f6f75232a6a534e2"}, + {file = "pydantic_core-2.14.6-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e574de99d735b3fc8364cba9912c2bec2da78775eba95cbb225ef7dda6acea24"}, + {file = "pydantic_core-2.14.6-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1302a54f87b5cd8528e4d6d1bf2133b6aa7c6122ff8e9dc5220fbc1e07bffebd"}, + {file = "pydantic_core-2.14.6-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f8e81e4b55930e5ffab4a68db1af431629cf2e4066dbdbfef65348b8ab804ea8"}, + {file = "pydantic_core-2.14.6-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:c99462ffc538717b3e60151dfaf91125f637e801f5ab008f81c402f1dff0cd0f"}, + {file = "pydantic_core-2.14.6-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e4cf2d5829f6963a5483ec01578ee76d329eb5caf330ecd05b3edd697e7d768a"}, + {file = "pydantic_core-2.14.6-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:cf10b7d58ae4a1f07fccbf4a0a956d705356fea05fb4c70608bb6fa81d103cda"}, + {file = "pydantic_core-2.14.6-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:399ac0891c284fa8eb998bcfa323f2234858f5d2efca3950ae58c8f88830f145"}, + {file = "pydantic_core-2.14.6-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c6a5c79b28003543db3ba67d1df336f253a87d3112dac3a51b94f7d48e4c0e1"}, + {file = "pydantic_core-2.14.6-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:599c87d79cab2a6a2a9df4aefe0455e61e7d2aeede2f8577c1b7c0aec643ee8e"}, + {file = "pydantic_core-2.14.6-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:43e166ad47ba900f2542a80d83f9fc65fe99eb63ceec4debec160ae729824052"}, + {file = "pydantic_core-2.14.6-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:3a0b5db001b98e1c649dd55afa928e75aa4087e587b9524a4992316fa23c9fba"}, + {file = "pydantic_core-2.14.6-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:747265448cb57a9f37572a488a57d873fd96bf51e5bb7edb52cfb37124516da4"}, + {file = "pydantic_core-2.14.6-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:7ebe3416785f65c28f4f9441e916bfc8a54179c8dea73c23023f7086fa601c5d"}, + {file = "pydantic_core-2.14.6-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:86c963186ca5e50d5c8287b1d1c9d3f8f024cbe343d048c5bd282aec2d8641f2"}, + {file = "pydantic_core-2.14.6-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:e0641b506486f0b4cd1500a2a65740243e8670a2549bb02bc4556a83af84ae03"}, + {file = "pydantic_core-2.14.6-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71d72ca5eaaa8d38c8df16b7deb1a2da4f650c41b58bb142f3fb75d5ad4a611f"}, + {file = "pydantic_core-2.14.6-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:27e524624eace5c59af499cd97dc18bb201dc6a7a2da24bfc66ef151c69a5f2a"}, + {file = "pydantic_core-2.14.6-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a3dde6cac75e0b0902778978d3b1646ca9f438654395a362cb21d9ad34b24acf"}, + {file = "pydantic_core-2.14.6-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:00646784f6cd993b1e1c0e7b0fdcbccc375d539db95555477771c27555e3c556"}, + {file = "pydantic_core-2.14.6-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:23598acb8ccaa3d1d875ef3b35cb6376535095e9405d91a3d57a8c7db5d29341"}, + {file = "pydantic_core-2.14.6-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7f41533d7e3cf9520065f610b41ac1c76bc2161415955fbcead4981b22c7611e"}, + {file = "pydantic_core-2.14.6.tar.gz", hash = "sha256:1fd0c1d395372843fba13a51c28e3bb9d59bd7aebfeb17358ffaaa1e4dbbe948"}, ] [package.dependencies] @@ -797,13 +817,13 @@ windows-terminal = ["colorama (>=0.4.6)"] [[package]] name = "pytest" -version = "7.4.3" +version = "7.4.4" description = "pytest: simple powerful testing with Python" optional = false python-versions = ">=3.7" files = [ - {file = "pytest-7.4.3-py3-none-any.whl", hash = "sha256:0d009c083ea859a71b76adf7c1d502e4bc170b80a8ef002da5806527b9591fac"}, - {file = "pytest-7.4.3.tar.gz", hash = "sha256:d989d136982de4e3b29dabcc838ad581c64e8ed52c11fbe86ddebd9da0818cd5"}, + {file = "pytest-7.4.4-py3-none-any.whl", hash = "sha256:b090cdf5ed60bf4c45261be03239c2c1c22df034fbffe691abe93cd80cea01d8"}, + {file = "pytest-7.4.4.tar.gz", hash = "sha256:2cf0005922c6ace4a3e2ec8b4080eb0d9753fdc93107415332f50ce9e7994280"}, ] [package.dependencies] @@ -1049,21 +1069,21 @@ use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] [[package]] name = "rich" -version = "12.6.0" +version = "13.7.0" description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" optional = false -python-versions = ">=3.6.3,<4.0.0" +python-versions = ">=3.7.0" files = [ - {file = "rich-12.6.0-py3-none-any.whl", hash = "sha256:a4eb26484f2c82589bd9a17c73d32a010b1e29d89f1604cd9bf3a2097b81bb5e"}, - {file = "rich-12.6.0.tar.gz", hash = "sha256:ba3a3775974105c221d31141f2c116f4fd65c5ceb0698657a11e9f295ec93fd0"}, + {file = "rich-13.7.0-py3-none-any.whl", hash = "sha256:6da14c108c4866ee9520bbffa71f6fe3962e193b7da68720583850cd4548e235"}, + {file = "rich-13.7.0.tar.gz", hash = "sha256:5cb5123b5cf9ee70584244246816e9114227e0b98ad9176eede6ad54bf5403fa"}, ] [package.dependencies] -commonmark = ">=0.9.0,<0.10.0" -pygments = ">=2.6.0,<3.0.0" +markdown-it-py = ">=2.2.0" +pygments = ">=2.13.0,<3.0.0" [package.extras] -jupyter = ["ipywidgets (>=7.5.1,<8.0.0)"] +jupyter = ["ipywidgets (>=7.5.1,<9)"] [[package]] name = "ruamel-yaml" @@ -1144,28 +1164,28 @@ files = [ [[package]] name = "ruff" -version = "0.1.7" +version = "0.1.11" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.1.7-py3-none-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:7f80496854fdc65b6659c271d2c26e90d4d401e6a4a31908e7e334fab4645aac"}, - {file = "ruff-0.1.7-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:1ea109bdb23c2a4413f397ebd8ac32cb498bee234d4191ae1a310af760e5d287"}, - {file = "ruff-0.1.7-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b0c2de9dd9daf5e07624c24add25c3a490dbf74b0e9bca4145c632457b3b42a"}, - {file = "ruff-0.1.7-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:69a4bed13bc1d5dabf3902522b5a2aadfebe28226c6269694283c3b0cecb45fd"}, - {file = "ruff-0.1.7-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:de02ca331f2143195a712983a57137c5ec0f10acc4aa81f7c1f86519e52b92a1"}, - {file = "ruff-0.1.7-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:45b38c3f8788a65e6a2cab02e0f7adfa88872696839d9882c13b7e2f35d64c5f"}, - {file = "ruff-0.1.7-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6c64cb67b2025b1ac6d58e5ffca8f7b3f7fd921f35e78198411237e4f0db8e73"}, - {file = "ruff-0.1.7-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9dcc6bb2f4df59cb5b4b40ff14be7d57012179d69c6565c1da0d1f013d29951b"}, - {file = "ruff-0.1.7-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:df2bb4bb6bbe921f6b4f5b6fdd8d8468c940731cb9406f274ae8c5ed7a78c478"}, - {file = "ruff-0.1.7-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:276a89bcb149b3d8c1b11d91aa81898fe698900ed553a08129b38d9d6570e717"}, - {file = "ruff-0.1.7-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:90c958fe950735041f1c80d21b42184f1072cc3975d05e736e8d66fc377119ea"}, - {file = "ruff-0.1.7-py3-none-musllinux_1_2_i686.whl", hash = "sha256:6b05e3b123f93bb4146a761b7a7d57af8cb7384ccb2502d29d736eaade0db519"}, - {file = "ruff-0.1.7-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:290ecab680dce94affebefe0bbca2322a6277e83d4f29234627e0f8f6b4fa9ce"}, - {file = "ruff-0.1.7-py3-none-win32.whl", hash = "sha256:416dfd0bd45d1a2baa3b1b07b1b9758e7d993c256d3e51dc6e03a5e7901c7d80"}, - {file = "ruff-0.1.7-py3-none-win_amd64.whl", hash = "sha256:4af95fd1d3b001fc41325064336db36e3d27d2004cdb6d21fd617d45a172dd96"}, - {file = "ruff-0.1.7-py3-none-win_arm64.whl", hash = "sha256:0683b7bfbb95e6df3c7c04fe9d78f631f8e8ba4868dfc932d43d690698057e2e"}, - {file = "ruff-0.1.7.tar.gz", hash = "sha256:dffd699d07abf54833e5f6cc50b85a6ff043715da8788c4a79bcd4ab4734d306"}, + {file = "ruff-0.1.11-py3-none-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:a7f772696b4cdc0a3b2e527fc3c7ccc41cdcb98f5c80fdd4f2b8c50eb1458196"}, + {file = "ruff-0.1.11-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:934832f6ed9b34a7d5feea58972635c2039c7a3b434fe5ba2ce015064cb6e955"}, + {file = "ruff-0.1.11-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ea0d3e950e394c4b332bcdd112aa566010a9f9c95814844a7468325290aabfd9"}, + {file = "ruff-0.1.11-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9bd4025b9c5b429a48280785a2b71d479798a69f5c2919e7d274c5f4b32c3607"}, + {file = "ruff-0.1.11-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e1ad00662305dcb1e987f5ec214d31f7d6a062cae3e74c1cbccef15afd96611d"}, + {file = "ruff-0.1.11-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:4b077ce83f47dd6bea1991af08b140e8b8339f0ba8cb9b7a484c30ebab18a23f"}, + {file = "ruff-0.1.11-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c4a88efecec23c37b11076fe676e15c6cdb1271a38f2b415e381e87fe4517f18"}, + {file = "ruff-0.1.11-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5b25093dad3b055667730a9b491129c42d45e11cdb7043b702e97125bcec48a1"}, + {file = "ruff-0.1.11-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:231d8fb11b2cc7c0366a326a66dafc6ad449d7fcdbc268497ee47e1334f66f77"}, + {file = "ruff-0.1.11-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:09c415716884950080921dd6237767e52e227e397e2008e2bed410117679975b"}, + {file = "ruff-0.1.11-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:0f58948c6d212a6b8d41cd59e349751018797ce1727f961c2fa755ad6208ba45"}, + {file = "ruff-0.1.11-py3-none-musllinux_1_2_i686.whl", hash = "sha256:190a566c8f766c37074d99640cd9ca3da11d8deae2deae7c9505e68a4a30f740"}, + {file = "ruff-0.1.11-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:6464289bd67b2344d2a5d9158d5eb81025258f169e69a46b741b396ffb0cda95"}, + {file = "ruff-0.1.11-py3-none-win32.whl", hash = "sha256:9b8f397902f92bc2e70fb6bebfa2139008dc72ae5177e66c383fa5426cb0bf2c"}, + {file = "ruff-0.1.11-py3-none-win_amd64.whl", hash = "sha256:eb85ee287b11f901037a6683b2374bb0ec82928c5cbc984f575d0437979c521a"}, + {file = "ruff-0.1.11-py3-none-win_arm64.whl", hash = "sha256:97ce4d752f964ba559c7023a86e5f8e97f026d511e48013987623915431c7ea9"}, + {file = "ruff-0.1.11.tar.gz", hash = "sha256:f9d4d88cb6eeb4dfe20f9f0519bd2eaba8119bde87c3d5065c541dbae2b5a2cb"}, ] [[package]] @@ -1193,13 +1213,13 @@ gitlab = ["python-gitlab (>=1.3.0)"] [[package]] name = "setuptools" -version = "69.0.2" +version = "69.0.3" description = "Easily download, build, install, upgrade, and uninstall Python packages" optional = false python-versions = ">=3.8" files = [ - {file = "setuptools-69.0.2-py3-none-any.whl", hash = "sha256:1e8fdff6797d3865f37397be788a4e3cba233608e9b509382a2777d25ebde7f2"}, - {file = "setuptools-69.0.2.tar.gz", hash = "sha256:735896e78a4742605974de002ac60562d286fa8051a7e2299445e8e8fbb01aa6"}, + {file = "setuptools-69.0.3-py3-none-any.whl", hash = "sha256:385eb4edd9c9d5c17540511303e39a147ce2fc04bc55289c322b9e5904fe2c05"}, + {file = "setuptools-69.0.3.tar.gz", hash = "sha256:be1af57fc409f93647f2e8e4573a142ed38724b8cdd389706a867bb4efcf1e78"}, ] [package.extras] @@ -1281,26 +1301,27 @@ files = [ [[package]] name = "typer" -version = "0.7.0" +version = "0.9.0" description = "Typer, build great CLIs. Easy to code. Based on Python type hints." optional = false python-versions = ">=3.6" files = [ - {file = "typer-0.7.0-py3-none-any.whl", hash = "sha256:b5e704f4e48ec263de1c0b3a2387cd405a13767d2f907f44c1a08cbad96f606d"}, - {file = "typer-0.7.0.tar.gz", hash = "sha256:ff797846578a9f2a201b53442aedeb543319466870fbe1c701eab66dd7681165"}, + {file = "typer-0.9.0-py3-none-any.whl", hash = "sha256:5d96d986a21493606a358cae4461bd8cdf83cbf33a5aa950ae629ca3b51467ee"}, + {file = "typer-0.9.0.tar.gz", hash = "sha256:50922fd79aea2f4751a8e0408ff10d2662bd0c8bbfa84755a699f3bada2978b2"}, ] [package.dependencies] click = ">=7.1.1,<9.0.0" colorama = {version = ">=0.4.3,<0.5.0", optional = true, markers = "extra == \"all\""} -rich = {version = ">=10.11.0,<13.0.0", optional = true, markers = "extra == \"all\""} +rich = {version = ">=10.11.0,<14.0.0", optional = true, markers = "extra == \"all\""} shellingham = {version = ">=1.3.0,<2.0.0", optional = true, markers = "extra == \"all\""} +typing-extensions = ">=3.7.4.3" [package.extras] -all = ["colorama (>=0.4.3,<0.5.0)", "rich (>=10.11.0,<13.0.0)", "shellingham (>=1.3.0,<2.0.0)"] +all = ["colorama (>=0.4.3,<0.5.0)", "rich (>=10.11.0,<14.0.0)", "shellingham (>=1.3.0,<2.0.0)"] dev = ["autoflake (>=1.3.1,<2.0.0)", "flake8 (>=3.8.3,<4.0.0)", "pre-commit (>=2.17.0,<3.0.0)"] doc = ["cairosvg (>=2.5.2,<3.0.0)", "mdx-include (>=1.4.1,<2.0.0)", "mkdocs (>=1.1.2,<2.0.0)", "mkdocs-material (>=8.1.4,<9.0.0)", "pillow (>=9.3.0,<10.0.0)"] -test = ["black (>=22.3.0,<23.0.0)", "coverage (>=6.2,<7.0)", "isort (>=5.0.6,<6.0.0)", "mypy (==0.910)", "pytest (>=4.4.0,<8.0.0)", "pytest-cov (>=2.10.0,<5.0.0)", "pytest-sugar (>=0.9.4,<0.10.0)", "pytest-xdist (>=1.32.0,<4.0.0)", "rich (>=10.11.0,<13.0.0)", "shellingham (>=1.3.0,<2.0.0)"] +test = ["black (>=22.3.0,<23.0.0)", "coverage (>=6.2,<7.0)", "isort (>=5.0.6,<6.0.0)", "mypy (==0.910)", "pytest (>=4.4.0,<8.0.0)", "pytest-cov (>=2.10.0,<5.0.0)", "pytest-sugar (>=0.9.4,<0.10.0)", "pytest-xdist (>=1.32.0,<4.0.0)", "rich (>=10.11.0,<14.0.0)", "shellingham (>=1.3.0,<2.0.0)"] [[package]] name = "typing-extensions" @@ -1352,4 +1373,4 @@ test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess [metadata] lock-version = "2.0" python-versions = "^3.10" -content-hash = "269af20479971fc8a9dac3c187aacebf049cf687e91b4806a1b27c48e48fda8d" +content-hash = "1389751890bf2c1c64f410f1a3fc7b1d05c692c839b535e6220f74333ad18229" diff --git a/stac_model/pyproject.toml b/stac_model/pyproject.toml index f73725b..3598cbd 100644 --- a/stac_model/pyproject.toml +++ b/stac_model/pyproject.toml @@ -44,9 +44,9 @@ classifiers = [ # UPDATEME with additional classifiers; remove last classifier [tool.poetry.dependencies] python = "^3.10" -typer = {extras = ["all"], version = "^0.7.0"} -rich = "^12.6.0" -pydantic = "~2.3.0" # bug in post 2.3 https://github.com/pydantic/pydantic/issues/7720 +typer = {extras = ["all"], version = "^0.9.0"} +rich = "^13.7.0" +pydantic = "^2.5.0" # bug in post 2.3 https://github.com/pydantic/pydantic/issues/7720 pydantic-core = "~2" numpy = "^1.26.2" diff --git a/stac_model/stac_model/__main__.py b/stac_model/stac_model/__main__.py index 87fc704..926882c 100644 --- a/stac_model/stac_model/__main__.py +++ b/stac_model/stac_model/__main__.py @@ -34,7 +34,7 @@ def main( """Generate example spec.""" input_array = InputArray( - dtype="float32", shape=[-1, 13, 64, 64], dim_ordering="bchw" + dtype="float32", shape=[-1, 13, 64, 64], dim_order="bchw" ) band_list = [] bands = [Band(name=b, description = f"Band {b}", nodata=-9999, data_type="float32", unit="reflectance") for b in band_list] @@ -44,12 +44,11 @@ def main( stddev= [245.71762908, 333.00778264, 395.09249139, 593.75055589, 566.4170017, 861.18399006, 1086.63139075, 1117.98170791, 404.91978886, 4.77584468, 1002.58768311, 761.30323499, 1231.58581042]) - mlm_input = ModelInput(name= "13 Band Sentinel-2 Batch", bands=bands, input_array=input_array, norm_by_channel=True, norm_type="z_score", rescale_type="None", statistics=stats, pre_processing_function = "https://github.com/microsoft/torchgeo/blob/545abe8326efc2848feae69d0212a15faba3eb00/torchgeo/datamodules/eurosat.py") - mlm_architecture = Architecture(name = "ResNet-18", total_parameters= 11_700_000, model_type= "torch", summary= "Sourced from torchgeo python library, identifier is ResNet18_Weights.SENTINEL2_ALL_MOCO", pretrained= True) - mlm_runtime = Runtime(framework= "torch", version= "2.1.2+cu121", model_asset= ModelAsset(href= "https://huggingface.co/torchgeo/resnet18_sentinel2_all_moco/resolve/main/resnet18_sentinel2_all_moco-59bfdff9.pth"), - model_handler= "torchgeo.models.resnet.ResNet18", model_src_url= "https://github.com/huggingface/pytorch-image-models/blob/b5a4fa9c3be6ac732807db7e87d176f5e4fc06f1/timm/models/resnet.py#L362") - class_map = ClassMap( - class_to_label_id={ + mlm_input = Input(name= "13 Band Sentinel-2 Batch", bands=bands, input_array=input_array, norm_by_channel=True, norm_type="z_score", rescale_type="none", statistics=stats, pre_processing_function = "https://github.com/microsoft/torchgeo/blob/545abe8326efc2848feae69d0212a15faba3eb00/torchgeo/datamodules/eurosat.py") + mlm_architecture = Architecture(name = "ResNet-18", total_parameters= 11_700_000, summary= "Sourced from torchgeo python library, identifier is ResNet18_Weights.SENTINEL2_ALL_MOCO", pretrained= True) + mlm_runtime = Runtime(framework= "torch", version= "2.1.2+cu121", asset= Asset(href= "https://huggingface.co/torchgeo/resnet18_sentinel2_all_moco/resolve/main/resnet18_sentinel2_all_moco-59bfdff9.pth"), + handler= "torchgeo.models.resnet.ResNet18", source_code_url= "https://github.com/huggingface/pytorch-image-models/blob/b5a4fa9c3be6ac732807db7e87d176f5e4fc06f1/timm/models/resnet.py#L362") + mlm_output = Output(task= "classification", number_of_classes= 10, output_shape=[-1, 10], class_name_mapping= { "Annual Crop": 0, "Forest": 1, "Herbaceous Vegetation": 2, @@ -60,13 +59,11 @@ def main( "Residential Buildings": 7, "River": 8, "SeaLake": 9, - } - ) - mlm_output = ModelOutput(task= "classification", number_of_classes= 10, output_shape=[-1, 10], class_name_mapping= class_map.class_to_label_id) - ml_model_meta = MLModel(mlm_input, mlm_architecture, mlm_runtime, mlm_output) - json_str = ml_model_meta.model_dump_json(indent=2) + }) + ml_model_meta = MLModel(mlm_input=mlm_input, mlm_architecture=mlm_architecture, mlm_runtime=mlm_runtime, mlm_output=mlm_output) + json_str = ml_model_meta.model_dump_json(indent=2, exclude_none=True) with open("example.json", "w") as file: file.write(json_str) - print(ml_model_meta.model_dump_yaml(indent=2) + print(ml_model_meta.model_dump_json(indent=2, exclude_none=True)) if __name__ == "__main__": app() diff --git a/stac_model/stac_model/input.py b/stac_model/stac_model/input.py index 71f998c..5f74896 100644 --- a/stac_model/stac_model/input.py +++ b/stac_model/stac_model/input.py @@ -7,24 +7,24 @@ class InputArray(BaseModel): shape: List[Union[int,float]] dim_order: Literal["bhw", "bchw", "bthw", "btchw"] - dtype: str = Field(..., regex="^(uint8|uint16|int16|int32|float16|float32|float64)$") + dtype: str = Field(..., pattern="^(uint8|uint16|int16|int32|float16|float32|float64)$") class Statistics(BaseModel): - minimum: Optional[List[Union[float, int]]] - maximum: Optional[List[Union[float, int]]] - mean: Optional[List[float]] - stddev: Optional[List[float]] - count: Optional[List[int]] - valid_percent: Optional[List[float]] + minimum: Optional[List[Union[float, int]]] = None + maximum: Optional[List[Union[float, int]]] = None + mean: Optional[List[float]] = None + stddev: Optional[List[float]] = None + count: Optional[List[int]] = None + valid_percent: Optional[List[float]] = None class Band(BaseModel): name: str description: str nodata: float | int | str data_type: str - unit: Optional[str] + unit: Optional[str] = None -class ModelInput(BaseModel): +class Input(BaseModel): name: str bands: List[Band] input_array: InputArray @@ -34,6 +34,6 @@ class ModelInput(BaseModel): params: Optional[ Dict[str, int | float | str] ] = None - scaling_factor: Optional[float] - statistics: Optional[Statistics] - pre_processing_function: Optional[str | AnyUrl] + scaling_factor: Optional[float] = None + statistics: Optional[Statistics] = None + pre_processing_function: Optional[str | AnyUrl] = None diff --git a/stac_model/stac_model/output.py b/stac_model/stac_model/output.py index 00c0d82..af67bef 100644 --- a/stac_model/stac_model/output.py +++ b/stac_model/stac_model/output.py @@ -15,16 +15,15 @@ class TaskEnum(str, Enum): class ClassMap(BaseModel): class_to_label_id: Dict[str, int] - # Property to reverse the mapping @property def label_id_to_class(self) -> Dict[int, str]: # Reverse the mapping return {v: k for k, v in self.class_to_label_id.items()} -class ModelOutput(BaseModel): +class Output(BaseModel): task: TaskEnum number_of_classes: int output_shape: List[Union[int,float]] - class_name_mapping: Optional[ClassMap.class_to_label_id] - post_processing_function: Optional[str] + class_name_mapping: Optional[Dict[str, int]] = None + post_processing_function: Optional[str] = None diff --git a/stac_model/stac_model/paths.py b/stac_model/stac_model/paths.py index 30a5fa8..6786536 100644 --- a/stac_model/stac_model/paths.py +++ b/stac_model/stac_model/paths.py @@ -1,3 +1,7 @@ +from pydantic import ( + field_validator, + AnyUrl +) class S3Path(AnyUrl): allowed_schemes = {"s3"} user_required = False diff --git a/stac_model/stac_model/runtime.py b/stac_model/stac_model/runtime.py index f19c0e6..e11249d 100644 --- a/stac_model/stac_model/runtime.py +++ b/stac_model/stac_model/runtime.py @@ -1,16 +1,16 @@ from .paths import S3Path -from pydantic import BaseModel, Field, FilePath, AnyUrl, field_validator +from pydantic import BaseModel, FilePath, AnyUrl, field_validator from typing import Optional, List -class ModelAsset(BaseModel): +class Asset(BaseModel): """Information about the model location and other additional file locations. Follows the Asset Object spec: https://github.com/radiantearth/stac-spec/blob/master/collection-spec/collection-spec.md#asset-object """ - href: S3Path | FilePath | str = Field(...) - title: Optional[str] = Field(None) - description: Optional[str] = Field(None) - type: Optional[str] = Field(None) - roles: Optional[List[str]] = Field(None) + href: S3Path | FilePath | str + title: Optional[str] = None + description: Optional[str] = None + type: Optional[str] = None + roles: Optional[List[str]] = None class Config: @@ -41,10 +41,10 @@ class ContainerInfo(BaseModel): class Runtime(BaseModel): framework: str version: str - model_asset: ModelAsset - model_handler: str - model_src_url: str - model_commit_hash: str - container: List[ContainerInfo] - batch_size_suggestion: int - hardware_suggestion: str | AnyUrl + asset: Asset + source_code_url: str + handler: Optional[str] = None + commit_hash: Optional[str] = None + container: Optional[ContainerInfo] = None + batch_size_suggestion: Optional[int] = None + hardware_suggestion: Optional[str | AnyUrl] = None diff --git a/stac_model/stac_model/schema.py b/stac_model/stac_model/schema.py index 000af71..3017921 100644 --- a/stac_model/stac_model/schema.py +++ b/stac_model/stac_model/schema.py @@ -1,22 +1,21 @@ from pydantic import BaseModel from typing import Optional -from .input import ModelInput, InputArray, Band, Statistics -from .output import ModelOutput, ClassMap -from .runtime import Runtime, ModelAsset +from .input import Input, InputArray, Band, Statistics +from .output import Output, ClassMap +from .runtime import Runtime, Asset, ContainerInfo class Architecture(BaseModel): name: str - model_type: str summary: str pretrained: bool - total_parameters: Optional[int] - on_disk_size_mb: Optional[float] - ram_size_mb: Optional[float] + total_parameters: Optional[int] = None + on_disk_size_mb: Optional[float] = None + ram_size_mb: Optional[float] = None class MLModel(BaseModel): - mlm_input: ModelInput + mlm_input: Input mlm_architecture: Architecture mlm_runtime: Runtime - mlm_output: ModelOutput + mlm_output: Output -__all__ = ["MLModel", "ModelInput", "InputArray", "Band", "Statistics", "ModelOutput", "ModelAsset", "ClassMap", "Runtime", "ContainerInfo", "Model Asset", "Architecture"] +__all__ = ["MLModel", "Input", "InputArray", "Band", "Statistics", "Output", "Asset", "ClassMap", "Runtime", "ContainerInfo", "Asset", "Architecture"] From cdc6cbaefc23aa4a3bbe93172b870f531f87d8dc Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Fri, 5 Jan 2024 17:36:11 -0800 Subject: [PATCH 009/112] README updates --- README.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/README.md b/README.md index 06ca73c..e0ba826 100644 --- a/README.md +++ b/README.md @@ -83,7 +83,6 @@ A deviation is that we do not include the [Statistics](https://github.com/radian | Field Name | Type | Description | |-------------------------|---------|-------------------------------------------------------------| | name | string | The name of the model architecture. For example, "ResNet-18" or "Random Forest" | -| model_type | string | Type of network (ex: ResNet-18). | | summary | string | Summary of the layers, can be the output of `print(model)`. | | pretrained | string | Indicates the source of the pretraining (ex: ImageNet). | | total_parameters | integer | Total number of parameters. | @@ -98,7 +97,7 @@ A deviation is that we do not include the [Statistics](https://github.com/radian | version | string | Framework version (some models require a specific version of the framework). | | model_asset | [Asset Object](https://github.com/radiantearth/stac-spec/blob/master/collection-spec/collection-spec.md#asset-object) | Common Metadata Collection level asset object containing URI to the model file. | | model_handler | string | Inference execution function. | -| model_src_url | string | Url of the source code (ex: GitHub repo). | +| source_code_url | string | Url of the source code (ex: GitHub repo). | | model_commit_hash | string | Hash value pointing to a specific version of the code. | | docker | [Container](#container) | Information for the deployment of the model in a docker instance. | | batch_size_suggestion | number | A suggested batch size for a given compute instance type | From f746d6c35f1a04e6cda1cef4541104369d41b5e4 Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Fri, 5 Jan 2024 17:39:25 -0800 Subject: [PATCH 010/112] add to CHANGELOG --- CHANGELOG.md | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 664fd18..1cacce5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,19 +12,17 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - batch_size and hardware suggestion - ram_size_mb to specify model ram requirements during inference - added time to the Tensor object as an optional dim +- Use common metadata Asset Object to refer to Model Artifact and artifact metadata as a Collection level object ### Changed - selected_bands > band_names, the same human readable names used in the common metadata band objects. - replaced normalization:mean, etc. with statistics from STAC 1.1 common metadata -- added pydantic models for internal schema objects +- added pydantic models for internal schema objects in stac_model package and published to PYPI [raster-band-object]: https://github.com/stac-extensions/raster/#raster-band-object ### Deprecated -- Specifying `class_name_mapping` by array is deprecated. - Direct mapping as an object of index to class name should be used. - For backward compatibility, mapping as array and using nested objects with `index` and `class_name` properties - is still permitted, although overly verbose compared to the direct mapping. +- ### Removed - Data Object, replaced with common metadata band object which also records data_type and nodata type From c68dd720ad2f38f2bc570b9935354e505023afe9 Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Tue, 9 Jan 2024 12:05:58 -0800 Subject: [PATCH 011/112] address comments --- CHANGELOG.md | 4 +- README.md | 139 ++++++++++++++++++++------------------ stac_model/README.md | 110 +++++++++++++++++++----------- stac_model/pyproject.toml | 1 + 4 files changed, 146 insertions(+), 108 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1cacce5..6a5eddd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,13 +19,11 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - replaced normalization:mean, etc. with statistics from STAC 1.1 common metadata - added pydantic models for internal schema objects in stac_model package and published to PYPI -[raster-band-object]: https://github.com/stac-extensions/raster/#raster-band-object - ### Deprecated - ### Removed -- Data Object, replaced with common metadata band object which also records data_type and nodata type +- Data Object, replaced with [common metadata band object](https://github.com/radiantearth/stac-spec/blob/f9b3c59ba810541c9da70c5f8d39635f8cba7bcd/item-spec/common-metadata.md#bands) which also records data_type and nodata type # TODO link release here diff --git a/README.md b/README.md index e0ba826..19832b9 100644 --- a/README.md +++ b/README.md @@ -1,26 +1,28 @@ -# ML Model Extension Specification +# Machine Learning Model Extension Specification [![hackmd-github-sync-badge](https://hackmd.io/3fB1lrHSTcSHQS57UhVk-Q/badge)](https://hackmd.io/3fB1lrHSTcSHQS57UhVk-Q) -- **Title:** Model Extension -- **Identifier:** [https://schemas.stacspec.org/v1.0.0-beta.3/extensions/ml-model/json-schema/schema.json](https://schemas.stacspec.org/v1.0.0-beta.3/extensions/ml-model/json-schema/schema.json) +- **Title:** Machine Learning Model Extension +- **Identifier:** [https://schemas.stacspec.org/2.0.0.alpha.0/extensions/ml-model/json-schema/schema.json](https://schemas.stacspec.org/2.0.0.alpha.0/extensions/ml-model/json-schema/schema.json) - **Field Name Prefix:** mlm - **Scope:** Item, Collection - **Extension Maturity Classification:** Proposal - **Owner:** - - [@sfoucher](https://github.com/sfoucher) - [@fmigneault](https://github.com/fmigneault) - - [@ymoisan](https://github.com/ymoisan) - [@rbavery](https://github.com/rbavery) + - [@ymoisan](https://github.com/ymoisan) + - [@sfoucher](https://github.com/sfoucher) + +The STAC Machine Learning Model (MLM) Extension provides a standard set of fields to describe machine learning models trained on overhead imagery and enable running model inference. -This document explains the Template Extension to the [SpatioTemporal Asset Catalog (STAC)](https://github.com/radiantearth/stac-spec) specification. This document explains the fields of the STAC Model Extension to a STAC Item. The main objective of the extension is two-fold: 1) to enable building model collections that can be searched alongside associated STAC datasets and 2) to record all necessary parameters, artifact locations, and high level processing steps to deploy an inference service. Specifically, this extension records the following info to make ML models searchable and reusable: +The main objective of the extension is two-fold: 1) to enable building model collections that can be searched alongside associated STAC datasets and 2) to record all necessary bands, parameters, modeling artifact locations, and high-level processing steps to deploy an inference service. Specifically, this extension records the following information to make ML models searchable and reusable: 1. Sensor band specifications 2. The two fundamental transforms on model inputs: rescale and normalization 3. Model output shape, data type, and its semantic interpretation 4. An optional, flexible description of the runtime environment to be able to run the model 5. Scientific references -Note: The spec is biased towards supervised ML models the produce classifications. However, fields that relate to supervised ML are optional and users can use the fields they need for different tasks. +Note: The MLM specification is biased towards supervised ML models the produce classifications. However, fields that relate to supervised ML are optional and users can use the fields they need for different tasks. Check the original technical report for an earlier version of the Model Extension [here](https://github.com/crim-ca/CCCOT03/raw/main/CCCOT03_Rapport%20Final_FINAL_EN.pdf) for more details. @@ -34,12 +36,13 @@ Check the original technical report for an earlier version of the Model Extensio ## Item Properties and Collection Fields -| Field Name | Type | Description | -|------------------|---------------------------------------------|------------------------------------------------------------------------| -| dlm:input | [ModelInput](#model-input) | Describes the transformation between the EO data and the model input. | -| dlm:architecture | [Architecture](#architecture) | Describes the model architecture. | -| dlm:runtime | [Runtime](#runtime) | Describes the runtime environments to run the model (inference). | -| dlm:output | [ModelOutput](#model-output) | Describes each model output and how to interpret it. | +| Field Name | Type | Description | +|------------------|---------------------------------------------|-----------------------------------------------------------------------| +| mlm:input | [[Model Input Object](#model-input-object)] | Describes the transformation between the EO data and the model input. | +| mlm:architecture | [Architecture](#architecture) | Describes the model architecture. | +| mlm:runtime | [Runtime](#runtime) | Describes the runtime environments to run the model (inference). | +| mlm:output | [ModelOutput](#model-output) | Describes each model output and how to interpret it. | + In addition, fields from the following extensions must be imported in the item: - [Scientific Extension Specification][stac-ext-sci] to describe relevant publications. @@ -48,82 +51,84 @@ In addition, fields from the following extensions must be imported in the item: [stac-ext-sci]: https://github.com/radiantearth/stac-spec/tree/v1.0.0-beta.2/extensions/scientific/README.md [stac-ext-ver]: https://github.com/radiantearth/stac-spec/tree/v1.0.0-beta.2/extensions/version/README.md - ### Model Input -| Field Name | Type | Description | -|-------------------------|---------------------------------|---------------------------------------------------------------------------------------------------------------------------------| -| name | string | Informative name of the input variable. Example "RGB Time Series" | -| bands | [Band](#bands) | Describes the EO data used to train or fine-tune the model. | -| input_array | [Array](#array) | Shape of the input array/tensor ($N \times C \times H \times W$). | -| params | dict | dictionary with names for the parameters and their values. some models may take multiple input arrays, scalars, other non-tensor inputs. | -| scaling_factor | number | Scaling factor to apply to get data within a `[0,1]` range. For instance `scaling_factor=0.004` for 8-bit data. | -| norm_by_channel | string | Whether to normalize each channel by channel-wise statistics or to normalize by dataset statistics. "True" or "False" | -| norm_type | string | Normalization method. Select one option from "min_max", "z_score", "max_norm", "mean_norm", "unit_variance", "none" | -| rescale_type | string | High level descriptor of the rescaling method to change image shape. Select one option from "crop", "pad", "interpolation", "none". If your rescaling method combines more than one of these operations, provide the name of the operation instead| -| statistics | [Statistics](https://github.com/radiantearth/stac-spec/pull/1254/files#diff-2477b726f8c5d5d1c8b391be056db325e6918e78a24b414ccd757c7fbd574079R294) | Dataset statistics for the training dataset used to normalize the inputs. | -| pre_processing_function | string | A url to the preprocessing function where normalization and rescaling takes place, and any other significant operations. Or, instead, the function code path, for example: my_python_module_name:my_processing_function| +| Field Name | Type | Description | +|-------------------------|-------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| name | string | Informative name of the input variable. Example "RGB Time Series" | +| bands | [Band](#bands-and-statistics) | Describes the EO data used to train or fine-tune the model. | +| input_array | [Array](#array) | Shape of the input array/tensor ($N \times C \times H \times W$). | +| params | dict | Dictionary with names for the parameters and their values. Some models may take multiple input arrays, scalars, other non-tensor inputs. | +| scaling_factor | number | Scaling factor to apply to get data within a `[0,1]` range. For instance `scaling_factor=0.004` for 8-bit data. | +| norm_by_channel | string | Whether to normalize each channel by channel-wise statistics or to normalize by dataset statistics. "True" or "False" | +| norm_type | string | Normalization method. Select one option from "min_max", "z_score", "max_norm", "mean_norm", "unit_variance", "none" | +| rescale_type | string | High-level descriptor of the rescaling method to change image shape. Select one option from "crop", "pad", "interpolation", "none". If your rescaling method combines more than one of these operations, provide the name of the operation instead | +| statistics | [Statistics](stac-statistics) | Dataset statistics for the training dataset used to normalize the inputs. | +| pre_processing_function | string | A url to the preprocessing function where normalization and rescaling takes place, and any other significant operations. Or, instead, the function code path, for example: my_python_module_name:my_processing_function | #### Bands and Statistics We use the [STAC 1.1 Bands Object](https://github.com/radiantearth/stac-spec/pull/1254) for representing bands information, including nodata value, data type, and common band names. Only bands used to train or fine tune the model should be included in this list. -A deviation is that we do not include the [Statistics](https://github.com/radiantearth/stac-spec/pull/1254/files#diff-2477b726f8c5d5d1c8b391be056db325e6918e78a24b414ccd757c7fbd574079R294) object at the band object level, but at the Model Input level. This is because in machine learning, we typically only need statistics for the dataset used to train the model in order to normalize any given bands input. +A deviation is that we do not include the [Statistics](stac-statistics) object at the band object level, but at the Model Input level. This is because in machine learning, we typically only need statistics for the dataset used to train the model in order to normalize any given bands input. + +[stac-statistics]: https://github.com/radiantearth/stac-spec/pull/1254/files#diff-2477b726f8c5d5d1c8b391be056db325e6918e78a24b414ccd757c7fbd574079R294 #### Array -| Field Name | Type | Description | -|------------|--------|-------------------------------------| -| shape | [integer] | Shape of the input array, including the batch size dimension. The batch size dimension must either be greater than 0 or -1 to indicate an unspecified batch dimension size. | -|dim_order | string | How the above dimensions are ordered with the tensor. "bhw", "bchw", "bthw", "btchw" are valid orderings where b=batch, c=channel, t=time, h=height, w=width| -|dtype | string | The data type of values in the array. Suggested to use [Numpy numerical types](https://numpy.org/devdocs/user/basics.types.html), omitting the numpy module, e.g. "float32" | +| Field Name | Type | Description | +|------------|-----------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| shape | [integer] | Shape of the input array, including the batch size dimension. The batch size dimension must either be greater than 0 or -1 to indicate an unspecified batch dimension size. | +| dim_order | string | How the above dimensions are ordered with the tensor. "bhw", "bchw", "bthw", "btchw" are valid orderings where b=batch, c=channel, t=time, h=height, w=width | +| dtype | string | The data type of values in the array. Suggested to use [Numpy numerical types](https://numpy.org/devdocs/user/basics.types.html), omitting the numpy module, e.g. "float32" | ### Architecture -| Field Name | Type | Description | -|-------------------------|---------|-------------------------------------------------------------| -| name | string | The name of the model architecture. For example, "ResNet-18" or "Random Forest" | -| summary | string | Summary of the layers, can be the output of `print(model)`. | -| pretrained | string | Indicates the source of the pretraining (ex: ImageNet). | -| total_parameters | integer | Total number of parameters. | -| on_disk_size_mb | number | The memory size on disk of the model artifact (MB). | -| ram_size_mb | number | number | The memory size in accelerator memory during inference (MB).| +| Field Name | Type | Description | +|------------------|---------|---------------------------------------------------------------------------------| +| name | string | The name of the model architecture. For example, "ResNet-18" or "Random Forest" | +| summary | string | Summary of the layers, can be the output of `print(model)`. | +| pretrained | string | Indicates the source of the pretraining (ex: ImageNet). | +| total_parameters | integer | Total number of parameters. | +| on_disk_size_mb | number | The memory size on disk of the model artifact (MB). | +| ram_size_mb | number | The memory size in accelerator memory during inference (MB). | ### Runtime -| Field Name | Type | Description | -|-----------------------|------------------------------------|------------------------------------------------------------------------------------------| -| framework | string | Used framework (ex: PyTorch, TensorFlow). | -| version | string | Framework version (some models require a specific version of the framework). | -| model_asset | [Asset Object](https://github.com/radiantearth/stac-spec/blob/master/collection-spec/collection-spec.md#asset-object) | Common Metadata Collection level asset object containing URI to the model file. | -| model_handler | string | Inference execution function. | -| source_code_url | string | Url of the source code (ex: GitHub repo). | -| model_commit_hash | string | Hash value pointing to a specific version of the code. | -| docker | [Container](#container) | Information for the deployment of the model in a docker instance. | -| batch_size_suggestion | number | A suggested batch size for a given compute instance type | -| hardware_suggestion | str | A suggested cloud instance type or accelerator model | +| Field Name | Type | Description | +|-----------------------|----------------------------|---------------------------------------------------------------------------------| +| framework | string | Used framework (ex: PyTorch, TensorFlow). | +| version | string | Framework version (some models require a specific version of the framework). | +| model_asset | [Asset Object](stac-asset) | Common Metadata Collection level asset object containing URI to the model file. | +| model_handler | string | Inference execution function. | +| source_code_url | string | Url of the source code (ex: GitHub repo). | +| model_commit_hash | string | Hash value pointing to a specific version of the code. | +| docker | [Container](#container) | Information for the deployment of the model in a docker instance. | +| batch_size_suggestion | number | A suggested batch size for a given compute instance type | +| hardware_suggestion | string | A suggested cloud instance type or accelerator model | + +[stac-asset]: https://github.com/radiantearth/stac-spec/blob/master/collection-spec/collection-spec.md#asset-object #### Container -| Field Name | Type | Description | -|-------------|---------|-------------------------------------------------------| -| docker_file | string | Url of the Dockerfile. | -| image_name | string | Name of the docker image. | -| tag | string | Tag of the image. | -| working_dir | string | Working directory in the instance that can be mapped. | -| run | string | Running command. | -| accelerator | boolean | True if the docker image requires a custom accelerator (CPU,TPU,MPS). | +| Field Name | Type | Description | +|----------------|---------|--------------------------------------------------------------------------| +| container_file | string | Url of the container file (Dockerfile). | +| image_name | string | Name of the container image. | +| tag | string | Tag of the image. | +| working_dir | string | Working directory in the instance that can be mapped. | +| run | string | Running command. | +| accelerator | boolean | True if the container image requires a custom accelerator (CPU,TPU,MPS). | ### Output -| Field Name | Type | Description | -|--------------------------|-------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| task | [Task Enum](#task-enum) | Specifies the Machine Learning task for which the output can be used for. | -| number_of_classes | integer | Number of classes. | -| output_shape | \[integer] | Shape of the output array/tensor from the model For example ($N \times H \times W$). Use -1 to indicate variable dimensions, like the batch dimension. | -| class_name_mapping | dict | Mapping of the output index to a short class name, for each record we specify the index and the class name. | -| post_processing_function | string | A url to the postprocessing function where normalization and rescaling takes place, and any other significant operations. Or, instead, the function code path, for example: my_python_module_name:my_processing_function| - +| Field Name | Type | Description | +|--------------------------|-------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| task | [Task Enum](#task-enum) | Specifies the Machine Learning task for which the output can be used for. | +| class_count | integer | Number of classes. | +| output_shape | \[integer] | Shape of the output array/tensor from the model For example ($N \times H \times W$). Use -1 to indicate variable dimensions, like the batch dimension. | +| class_name_mapping | dict | Mapping of the output index to a short class name, for each record we specify the index and the class name. | +| post_processing_function | string | A url to the postprocessing function where normalization and rescaling takes place, and any other significant operations. Or, instead, the function code path, for example: my_python_module_name:my_processing_function | #### Task Enum diff --git a/stac_model/README.md b/stac_model/README.md index 91439b9..c746941 100644 --- a/stac_model/README.md +++ b/stac_model/README.md @@ -54,38 +54,79 @@ Currently this looks like ``` { - "signatures": { - "inputs": [ - { - "name": "input_tensor", - "dtype": "float32", - "shape": [ - -1, - 13, - 64, - 64 - ] - } - ], - "outputs": [ - { - "name": "output_tensor", - "dtype": "float32", - "shape": [ - -1, - 10 - ] - } - ], - "params": null + "mlm_input": { + "name": "13 Band Sentinel-2 Batch", + "bands": [], + "input_array": { + "shape": [ + -1, + 13, + 64, + 64 + ], + "dim_order": "bchw", + "dtype": "float32" + }, + "norm_type": "z_score", + "rescale_type": "none", + "norm_by_channel": true, + "statistics": { + "mean": [ + 1354.40546513, + 1118.24399958, + 1042.92983953, + 947.62620298, + 1199.47283961, + 1999.79090914, + 2369.22292565, + 2296.82608323, + 732.08340178, + 12.11327804, + 1819.01027855, + 1118.92391149, + 2594.14080798 + ], + "stddev": [ + 245.71762908, + 333.00778264, + 395.09249139, + 593.75055589, + 566.4170017, + 861.18399006, + 1086.63139075, + 1117.98170791, + 404.91978886, + 4.77584468, + 1002.58768311, + 761.30323499, + 1231.58581042 + ] + }, + "pre_processing_function": "https://github.com/microsoft/torchgeo/blob/545abe8326efc2848feae69d0212a15faba3eb00/torchgeo/datamodules/eurosat.py" + }, + "mlm_architecture": { + "name": "ResNet-18", + "summary": "Sourced from torchgeo python library, identifier is ResNet18_Weights.SENTINEL2_ALL_MOCO", + "pretrained": true, + "total_parameters": 11700000 }, - "artifact": { - "path": "s3://example/s3/uri/model.pt", - "additional_files": null + "mlm_runtime": { + "framework": "torch", + "version": "2.1.2+cu121", + "asset": { + "href": "." + }, + "source_code_url": "https://github.com/huggingface/pytorch-image-models/blob/b5a4fa9c3be6ac732807db7e87d176f5e4fc06f1/timm/models/resnet.py#L362", + "handler": "torchgeo.models.resnet.ResNet18" }, - "id": "3fa03dceb4004b6e8a9e8591e4b3a99d", - "class_map": { - "class_to_label_id": { + "mlm_output": { + "task": "classification", + "number_of_classes": 10, + "output_shape": [ + -1, + 10 + ], + "class_name_mapping": { "Annual Crop": 0, "Forest": 1, "Herbaceous Vegetation": 2, @@ -97,14 +138,7 @@ Currently this looks like "River": 8, "SeaLake": 9 } - }, - "runtime_config": null, - "name": "eurosat", - "ml_model_type": null, - "ml_model_processor_type": "cpu", - "ml_model_learning_approach": null, - "ml_model_prediction_type": null, - "ml_model_architecture": null + } } ``` diff --git a/stac_model/pyproject.toml b/stac_model/pyproject.toml index 3598cbd..b984f85 100644 --- a/stac_model/pyproject.toml +++ b/stac_model/pyproject.toml @@ -49,6 +49,7 @@ rich = "^13.7.0" pydantic = "^2.5.0" # bug in post 2.3 https://github.com/pydantic/pydantic/issues/7720 pydantic-core = "~2" numpy = "^1.26.2" +fastapi="^0.108.0" [tool.poetry.group.dev.dependencies] From b5a1fc8497db084c10ac5de22a61558e6eb9ab2d Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Tue, 9 Jan 2024 12:44:56 -0800 Subject: [PATCH 012/112] address more comments --- README.md | 48 ++++++++++++++++++++++++++---------------------- 1 file changed, 26 insertions(+), 22 deletions(-) diff --git a/README.md b/README.md index 19832b9..5f81831 100644 --- a/README.md +++ b/README.md @@ -17,7 +17,7 @@ The STAC Machine Learning Model (MLM) Extension provides a standard set of field The main objective of the extension is two-fold: 1) to enable building model collections that can be searched alongside associated STAC datasets and 2) to record all necessary bands, parameters, modeling artifact locations, and high-level processing steps to deploy an inference service. Specifically, this extension records the following information to make ML models searchable and reusable: 1. Sensor band specifications -2. The two fundamental transforms on model inputs: rescale and normalization +2. Model input transforms including rescale and normalization 3. Model output shape, data type, and its semantic interpretation 4. An optional, flexible description of the runtime environment to be able to run the model 5. Scientific references @@ -56,11 +56,10 @@ In addition, fields from the following extensions must be imported in the item: | Field Name | Type | Description | |-------------------------|-------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | name | string | Informative name of the input variable. Example "RGB Time Series" | -| bands | [Band](#bands-and-statistics) | Describes the EO data used to train or fine-tune the model. | -| input_array | [Array](#array) | Shape of the input array/tensor ($N \times C \times H \times W$). | +| bands | [string] | Describes the EO bands used to train or fine-tune the model, which may be all or a subset of bands available in a STAc Item's [Band Object](#bands-and-statistics). | +| input_array | [NDArray](#ndarray) | The N-dimensional array object that describes the shape, dimension ordering, and data type. | | params | dict | Dictionary with names for the parameters and their values. Some models may take multiple input arrays, scalars, other non-tensor inputs. | -| scaling_factor | number | Scaling factor to apply to get data within a `[0,1]` range. For instance `scaling_factor=0.004` for 8-bit data. | -| norm_by_channel | string | Whether to normalize each channel by channel-wise statistics or to normalize by dataset statistics. "True" or "False" | +| norm_by_channel | boolean | Whether to normalize each channel by channel-wise statistics or to normalize by dataset statistics. | | norm_type | string | Normalization method. Select one option from "min_max", "z_score", "max_norm", "mean_norm", "unit_variance", "none" | | rescale_type | string | High-level descriptor of the rescaling method to change image shape. Select one option from "crop", "pad", "interpolation", "none". If your rescaling method combines more than one of these operations, provide the name of the operation instead | | statistics | [Statistics](stac-statistics) | Dataset statistics for the training dataset used to normalize the inputs. | @@ -68,30 +67,36 @@ In addition, fields from the following extensions must be imported in the item: #### Bands and Statistics -We use the [STAC 1.1 Bands Object](https://github.com/radiantearth/stac-spec/pull/1254) for representing bands information, including nodata value, data type, and common band names. Only bands used to train or fine tune the model should be included in this list. +We use the [STAC 1.1 Bands Object](https://github.com/radiantearth/stac-spec/pull/1254) for representing bands information, including nodata value, data type, and common band names. Only bands used to train or fine tune the model should be included in this `bands` field. -A deviation is that we do not include the [Statistics](stac-statistics) object at the band object level, but at the Model Input level. This is because in machine learning, we typically only need statistics for the dataset used to train the model in order to normalize any given bands input. +A deviation from the [STAC 1.1 Bands Object](https://github.com/radiantearth/stac-spec/pull/1254) is that we do not include the [Statistics](stac-statistics) object at the band object level, but at the Model Input level. This is because in machine learning, we typically only need statistics for the dataset used to train the model in order to normalize any given bands input. [stac-statistics]: https://github.com/radiantearth/stac-spec/pull/1254/files#diff-2477b726f8c5d5d1c8b391be056db325e6918e78a24b414ccd757c7fbd574079R294 -#### Array +#### NDArray -| Field Name | Type | Description | -|------------|-----------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| shape | [integer] | Shape of the input array, including the batch size dimension. The batch size dimension must either be greater than 0 or -1 to indicate an unspecified batch dimension size. | -| dim_order | string | How the above dimensions are ordered with the tensor. "bhw", "bchw", "bthw", "btchw" are valid orderings where b=batch, c=channel, t=time, h=height, w=width | -| dtype | string | The data type of values in the array. Suggested to use [Numpy numerical types](https://numpy.org/devdocs/user/basics.types.html), omitting the numpy module, e.g. "float32" | +| Field Name | Type | Description | +|------------|-----------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| shape | [integer] | Shape of the input n-dimensional array ($N \times C \times H \times W$), including the batch size dimension. The batch size dimension must either be greater than 0 or -1 to indicate an unspecified batch dimension size. | +| dim_order | string | How the above dimensions are ordered with the tensor. "bhw", "bchw", "bthw", "btchw" are valid orderings where b=batch, c=channel, t=time, h=height, w=width | +| dtype | string | The data type of values in the array. Suggested to use [Numpy numerical types](https://numpy.org/devdocs/user/basics.types.html), omitting the numpy module, e.g. "float32" | ### Architecture -| Field Name | Type | Description | -|------------------|---------|---------------------------------------------------------------------------------| -| name | string | The name of the model architecture. For example, "ResNet-18" or "Random Forest" | -| summary | string | Summary of the layers, can be the output of `print(model)`. | -| pretrained | string | Indicates the source of the pretraining (ex: ImageNet). | -| total_parameters | integer | Total number of parameters. | -| on_disk_size_mb | number | The memory size on disk of the model artifact (MB). | -| ram_size_mb | number | The memory size in accelerator memory during inference (MB). | +| Field Name | Type | Description | +|------------------|---------------------------------------|---------------------------------------------------------------------------------| +| name | string | The name of the model architecture. For example, "ResNet-18" or "Random Forest" | +| summary | string | Summary of the layers, can be the output of `print(model)`. | +| pretrained | string | Indicates the source of the pretraining (ex: ImageNet). | +| total_parameters | integer | Total number of parameters. | +| file_size | number | The size on disk of the model artifact (MB). | +| memory_size | number | The in-memory size on the accelerator during inference (MB). | +| accelerator | [Accelerator Enum](#accelerator-enum) | The intended accelerator that runs inference. | + +#### Accelerator Enum + +It is recommended to define `accelerator` with one of the following values: + ### Runtime @@ -118,7 +123,6 @@ A deviation is that we do not include the [Statistics](stac-statistics) object a | tag | string | Tag of the image. | | working_dir | string | Working directory in the instance that can be mapped. | | run | string | Running command. | -| accelerator | boolean | True if the container image requires a custom accelerator (CPU,TPU,MPS). | ### Output From ffa398e766bb3bc13c9208883bc9870625182c18 Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Tue, 9 Jan 2024 22:22:07 -0800 Subject: [PATCH 013/112] address most first draft comments --- CHANGELOG.md | 4 +- README.md | 154 ++++++++++++++++++++++++++++++--------------------- 2 files changed, 92 insertions(+), 66 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6a5eddd..bb79cf4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,8 +7,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] ### Added -- more Task Enum tasks -- accelerator options +- more [Task Enum](./README.md#task-enum) tasks +- [accelerator](./README#accelerators) options in [Runtime Object](./README#runtime-object) - batch_size and hardware suggestion - ram_size_mb to specify model ram requirements during inference - added time to the Tensor object as an optional dim diff --git a/README.md b/README.md index 5f81831..bcc643c 100644 --- a/README.md +++ b/README.md @@ -36,12 +36,12 @@ Check the original technical report for an earlier version of the Model Extensio ## Item Properties and Collection Fields -| Field Name | Type | Description | -|------------------|---------------------------------------------|-----------------------------------------------------------------------| -| mlm:input | [[Model Input Object](#model-input-object)] | Describes the transformation between the EO data and the model input. | -| mlm:architecture | [Architecture](#architecture) | Describes the model architecture. | -| mlm:runtime | [Runtime](#runtime) | Describes the runtime environments to run the model (inference). | -| mlm:output | [ModelOutput](#model-output) | Describes each model output and how to interpret it. | +| Field Name | Type | Description | +|------------------|---------------------------------------------|-------------------------------------------------------------------------------------| +| mlm:input | [[Model Input Object](#model-input-object)] | **REQUIRED.** Describes the transformation between the EO data and the model input. | +| mlm:architecture | [Architecture](#architecture) | **REQUIRED.** Describes the model architecture. | +| mlm:runtime | [Runtime](#runtime) | **REQUIRED.** Describes the runtime environments to run the model (inference). | +| mlm:output | [ModelOutput](#model-output) | **REQUIRED.** Describes each model output and how to interpret it. | In addition, fields from the following extensions must be imported in the item: @@ -51,19 +51,19 @@ In addition, fields from the following extensions must be imported in the item: [stac-ext-sci]: https://github.com/radiantearth/stac-spec/tree/v1.0.0-beta.2/extensions/scientific/README.md [stac-ext-ver]: https://github.com/radiantearth/stac-spec/tree/v1.0.0-beta.2/extensions/version/README.md -### Model Input +### Model Input Object -| Field Name | Type | Description | -|-------------------------|-------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| name | string | Informative name of the input variable. Example "RGB Time Series" | -| bands | [string] | Describes the EO bands used to train or fine-tune the model, which may be all or a subset of bands available in a STAc Item's [Band Object](#bands-and-statistics). | -| input_array | [NDArray](#ndarray) | The N-dimensional array object that describes the shape, dimension ordering, and data type. | -| params | dict | Dictionary with names for the parameters and their values. Some models may take multiple input arrays, scalars, other non-tensor inputs. | -| norm_by_channel | boolean | Whether to normalize each channel by channel-wise statistics or to normalize by dataset statistics. | -| norm_type | string | Normalization method. Select one option from "min_max", "z_score", "max_norm", "mean_norm", "unit_variance", "none" | -| rescale_type | string | High-level descriptor of the rescaling method to change image shape. Select one option from "crop", "pad", "interpolation", "none". If your rescaling method combines more than one of these operations, provide the name of the operation instead | -| statistics | [Statistics](stac-statistics) | Dataset statistics for the training dataset used to normalize the inputs. | -| pre_processing_function | string | A url to the preprocessing function where normalization and rescaling takes place, and any other significant operations. Or, instead, the function code path, for example: my_python_module_name:my_processing_function | +| Field Name | Type | Description | +|-------------------------|-----------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| name | string | **REQUIRED.** Informative name of the input variable. Example "RGB Time Series" | +| bands | [string] | **REQUIRED.** Describes the EO bands used to train or fine-tune the model, which may be all or a subset of bands available in a STAc Item's [Band Object](#bands-and-statistics). | +| input_feature | [Feature Array Object](#feature-array-object) | **REQUIRED.** The N-dimensional feature array object that describes the shape, dimension ordering, and data type. | +| params | dict | Dictionary with names for the parameters and their values. Some models may take multiple input arrays, scalars, other non-tensor inputs. | +| norm_by_channel | boolean | Whether to normalize each channel by channel-wise statistics or to normalize by dataset statistics. | +| norm_type | string | Normalization method. Select one option from "min_max", "z_score", "max_norm", "mean_norm", "unit_variance", "none" | +| rescale_type | string | High-level descriptor of the rescaling method to change image shape. Select one option from "crop", "pad", "interpolation", "none". If your rescaling method combines more than one of these operations, provide the name of the operation instead | +| statistics | [Statistics Object](stac-statistics) | Dataset statistics for the training dataset used to normalize the inputs. | +| pre_processing_function | string | A url to the preprocessing function where normalization and rescaling takes place, and any other significant operations. Or, instead, the function code path, for example: my_python_module_name:my_processing_function | #### Bands and Statistics @@ -73,66 +73,75 @@ A deviation from the [STAC 1.1 Bands Object](https://github.com/radiantearth/sta [stac-statistics]: https://github.com/radiantearth/stac-spec/pull/1254/files#diff-2477b726f8c5d5d1c8b391be056db325e6918e78a24b414ccd757c7fbd574079R294 -#### NDArray +#### Feature Array Object | Field Name | Type | Description | |------------|-----------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| shape | [integer] | Shape of the input n-dimensional array ($N \times C \times H \times W$), including the batch size dimension. The batch size dimension must either be greater than 0 or -1 to indicate an unspecified batch dimension size. | -| dim_order | string | How the above dimensions are ordered with the tensor. "bhw", "bchw", "bthw", "btchw" are valid orderings where b=batch, c=channel, t=time, h=height, w=width | -| dtype | string | The data type of values in the array. Suggested to use [Numpy numerical types](https://numpy.org/devdocs/user/basics.types.html), omitting the numpy module, e.g. "float32" | - -### Architecture - -| Field Name | Type | Description | -|------------------|---------------------------------------|---------------------------------------------------------------------------------| -| name | string | The name of the model architecture. For example, "ResNet-18" or "Random Forest" | -| summary | string | Summary of the layers, can be the output of `print(model)`. | -| pretrained | string | Indicates the source of the pretraining (ex: ImageNet). | -| total_parameters | integer | Total number of parameters. | -| file_size | number | The size on disk of the model artifact (MB). | -| memory_size | number | The in-memory size on the accelerator during inference (MB). | -| accelerator | [Accelerator Enum](#accelerator-enum) | The intended accelerator that runs inference. | +| shape | [integer] | **REQUIRED.** Shape of the input n-dimensional feature array ($N \times C \times H \times W$), including the batch size dimension. The batch size dimension must either be greater than 0 or -1 to indicate an unspecified batch dimension size. | +| dim_order | string | **REQUIRED.** How the above dimensions are ordered with the tensor. "bhw", "bchw", "bthw", "btchw" are valid orderings where b=batch, c=channel, t=time, h=height, w=width | +| dtype | string | **REQUIRED.** The data type of values in the feature array. Suggested to use [Numpy numerical types](https://numpy.org/devdocs/user/basics.types.html), omitting the numpy module, e.g. "float32" | + +### Architecture Object + +| Field Name | Type | Description | +|-------------------|---------|-----------------------------------------------------------------------------------------------| +| name | string | **REQUIRED.** The name of the model architecture. For example, "ResNet-18" or "Random Forest" | +| file_size | integer | **REQUIRED.** The size on disk of the model artifact (bytes). | +| memory_size | integer | **REQUIRED.** The in-memory size of the model on the accelerator during inference (bytes). | +| summary | string | Summary of the layers, can be the output of `print(model)`. | +| pretrained_source | string | Indicates the source of the pretraining (ex: ImageNet). | +| total_parameters | integer | Total number of parameters. | + +### Runtime Object + +| Field Name | Type | Description | +|-----------------------|---------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| framework | string | **REQUIRED.** Used framework (ex: PyTorch, TensorFlow). | +| version | string | **REQUIRED.** Framework version (some models require a specific version of the framework). | +| model_asset | [Asset Object](stac-asset) | **REQUIRED.** Common Metadata Collection level asset object containing URI to the model file. | +| accelerator | [Accelerator Enum](#accelerator-enum) | **REQUIRED.** The intended accelerator that runs inference. | +| hardware_summary | string | **REQUIRED.** A high level description of the number of accelerators, specific generation of accelerator, or other relevant inference details. | +| source_code | [Asset Object](stac-asset) | **REQUIRED.** Source code description. Can describe a github repo, zip archive, etc. This description should reference the inference function, for example my_package.my_module.predict | +| docker | [Container](#container) | **RECOMMENDED.** Information for the deployment of the model in a docker instance. | +| model_commit_hash | string | Hash value pointing to a specific version of the code. | +| batch_size_suggestion | number | A suggested batch size for the accelerator and summarized hardware. | #### Accelerator Enum It is recommended to define `accelerator` with one of the following values: - -### Runtime - -| Field Name | Type | Description | -|-----------------------|----------------------------|---------------------------------------------------------------------------------| -| framework | string | Used framework (ex: PyTorch, TensorFlow). | -| version | string | Framework version (some models require a specific version of the framework). | -| model_asset | [Asset Object](stac-asset) | Common Metadata Collection level asset object containing URI to the model file. | -| model_handler | string | Inference execution function. | -| source_code_url | string | Url of the source code (ex: GitHub repo). | -| model_commit_hash | string | Hash value pointing to a specific version of the code. | -| docker | [Container](#container) | Information for the deployment of the model in a docker instance. | -| batch_size_suggestion | number | A suggested batch size for a given compute instance type | -| hardware_suggestion | string | A suggested cloud instance type or accelerator model | +- `amd64` models compatible with AMD or Intel CPUs (no hardware specific optimizations) +- `cuda` models compatible with NVIDIA GPUs +- `xla` models compiled with XLA. models trained on TPUs are typically compiled with XLA. +- `amd-rocm` models trained on AMD GPUs +- `intel-ipex-cpu` for models optimized with IPEX for Intel CPUs +- `intel-ipex-gpu` for models optimized with IPEX for Intel GPUs +- `macos-arm` for models trained on Apple Silicon [stac-asset]: https://github.com/radiantearth/stac-spec/blob/master/collection-spec/collection-spec.md#asset-object -#### Container +#### Container Object + +| Field Name | Type | Description | +|----------------|--------|-------------------------------------------------------| +| container_file | string | Url of the container file (Dockerfile). | +| image_name | string | Name of the container image. | +| tag | string | Tag of the image. | +| working_dir | string | Working directory in the instance that can be mapped. | +| run | string | Running command. | -| Field Name | Type | Description | -|----------------|---------|--------------------------------------------------------------------------| -| container_file | string | Url of the container file (Dockerfile). | -| image_name | string | Name of the container image. | -| tag | string | Tag of the image. | -| working_dir | string | Working directory in the instance that can be mapped. | -| run | string | Running command. | +### Output Object -### Output +| Field Name | Type | Description | +|--------------------------|---------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| task | [Task Enum](#task-enum) | **REQUIRED.** Specifies the Machine Learning task for which the output can be used for. | +| number_of_classes | integer | Number of classes. | +| result | [[Result Object](#result-object)] | The list of output array/tensor from the model. For example ($N \times H \times W$). Use -1 to indicate variable dimensions, like the batch dimension. | +| class_name_mapping | [Class Map Object](#class-map-object) | Mapping of the class name to an index representing the label in the model output. | +| post_processing_function | string | A url to the postprocessing function where normalization and rescaling takes place, and any other significant operations. Or, instead, the function code path, for example: my_package.my_module.my_processing_function | -| Field Name | Type | Description | -|--------------------------|-------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| task | [Task Enum](#task-enum) | Specifies the Machine Learning task for which the output can be used for. | -| class_count | integer | Number of classes. | -| output_shape | \[integer] | Shape of the output array/tensor from the model For example ($N \times H \times W$). Use -1 to indicate variable dimensions, like the batch dimension. | -| class_name_mapping | dict | Mapping of the output index to a short class name, for each record we specify the index and the class name. | -| post_processing_function | string | A url to the postprocessing function where normalization and rescaling takes place, and any other significant operations. Or, instead, the function code path, for example: my_python_module_name:my_processing_function | +While only `task` is a required field, all fields are recommended for supervised tasks that produce a fixed shape tensor and have output classes. +`image-captioning`, `multi-modal`, and `generative` tasks may not return fixed shape tensors or classes. #### Task Enum @@ -153,6 +162,23 @@ STAC Collections and Items employed with the model described by this extension. [stac-ext-label-props]: https://github.com/stac-extensions/label#item-properties +#### Result Object + +| Field Name | Type | Description | +|------------|-----------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| shape | [integer] | **REQUIRED.** Shape of the n-dimensional result array ($N \times H \times W$), possibly including a batch size dimension. The batch size dimension must either be greater than 0 or -1 to indicate an unspecified batch dimension size. | +| dim_names | [string] | **REQUIRED.** The names of the above dimensions of the result array. | +| dtype | string | **REQUIRED.** The data type of values in the array. Suggested to use [Numpy numerical types](https://numpy.org/devdocs/user/basics.types.html), omitting the numpy module, e.g. "float32" | + + +#### Class Map Object + +| Field Name | Type | Description | +|-----------------------------------|---------|--------------------------------------------------------------------------| +| *class names depend on the model* | integer | There are N corresponding integer values corresponding to N class fieds. | + +The user can supply any number of fields for the classes of their model if the model produces a supervised classification result. | + ## Relation types The following types should be used as applicable `rel` types in the From 0716ee34b0c7071c648de0e5b62025a71f08c4ec Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Wed, 10 Jan 2024 10:29:23 -0800 Subject: [PATCH 014/112] add container instructions --- README.md | 28 ++++++++++++++++++++++++++-- 1 file changed, 26 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index bcc643c..6d2667d 100644 --- a/README.md +++ b/README.md @@ -98,10 +98,10 @@ A deviation from the [STAC 1.1 Bands Object](https://github.com/radiantearth/sta |-----------------------|---------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | framework | string | **REQUIRED.** Used framework (ex: PyTorch, TensorFlow). | | version | string | **REQUIRED.** Framework version (some models require a specific version of the framework). | -| model_asset | [Asset Object](stac-asset) | **REQUIRED.** Common Metadata Collection level asset object containing URI to the model file. | +| model_asset | [Asset Object](stac-asset) | **REQUIRED.** Asset object containing URI to the model file. | +| source_code | [Asset Object](stac-asset) | **REQUIRED.** Source code description. Can describe a github repo, zip archive, etc. This description should reference the inference function, for example my_package.my_module.predict | | accelerator | [Accelerator Enum](#accelerator-enum) | **REQUIRED.** The intended accelerator that runs inference. | | hardware_summary | string | **REQUIRED.** A high level description of the number of accelerators, specific generation of accelerator, or other relevant inference details. | -| source_code | [Asset Object](stac-asset) | **REQUIRED.** Source code description. Can describe a github repo, zip archive, etc. This description should reference the inference function, for example my_package.my_module.predict | | docker | [Container](#container) | **RECOMMENDED.** Information for the deployment of the model in a docker instance. | | model_commit_hash | string | Hash value pointing to a specific version of the code. | | batch_size_suggestion | number | A suggested batch size for the accelerator and summarized hardware. | @@ -130,6 +130,30 @@ It is recommended to define `accelerator` with one of the following values: | working_dir | string | Working directory in the instance that can be mapped. | | run | string | Running command. | +If you're unsure how to containerize your model, we suggest starting from the latest official container image for your framework that works with your model and pinning the container version. + +Examples: +[Pytorch Dockerhub](https://hub.docker.com/r/pytorch/pytorch/tags) +[Pytorch Docker Run Example](https://github.com/pytorch/pytorch?tab=readme-ov-file#docker-image) + +[Tensorflow Dockerhub](https://hub.docker.com/r/tensorflow/tensorflow/tags?page=8&ordering=last_updated) +[Tensorflow Docker Run Example](https://www.tensorflow.org/install/docker#gpu_support) + +Using a base image for a framework looks like + + +```dockerfile +# In your Dockerfile, pull the latest base image with all framework dependencies including accelerator drivers +FROM pytorch/pytorch:2.1.2-cuda11.8-cudnn8-runtime + +### Your specific environment setup to run your model +RUN pip install my_package +``` + +You can also use other base images. Pytorch and Tensorflow offer docker images for serving models for inference. +- [Torchserve](https://pytorch.org/serve/) +- [TFServing](https://github.com/tensorflow/serving) + ### Output Object | Field Name | Type | Description | From ab7415115066d7204fac390e4ab2d1aa4431e1ac Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Wed, 10 Jan 2024 10:42:25 -0800 Subject: [PATCH 015/112] fix fields missing object, add accelerator constrained field to runtime --- README.md | 31 ++++++++++++++++--------------- 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/README.md b/README.md index 6d2667d..0d2cc94 100644 --- a/README.md +++ b/README.md @@ -39,9 +39,9 @@ Check the original technical report for an earlier version of the Model Extensio | Field Name | Type | Description | |------------------|---------------------------------------------|-------------------------------------------------------------------------------------| | mlm:input | [[Model Input Object](#model-input-object)] | **REQUIRED.** Describes the transformation between the EO data and the model input. | -| mlm:architecture | [Architecture](#architecture) | **REQUIRED.** Describes the model architecture. | -| mlm:runtime | [Runtime](#runtime) | **REQUIRED.** Describes the runtime environments to run the model (inference). | -| mlm:output | [ModelOutput](#model-output) | **REQUIRED.** Describes each model output and how to interpret it. | +| mlm:architecture | [Architecture Object](#architecture-object) | **REQUIRED.** Describes the model architecture. | +| mlm:runtime | [Runtime Object](#runtime-object) | **REQUIRED.** Describes the runtime environments to run the model (inference). | +| mlm:output | [Model Output Object](#model-output-object) | **REQUIRED.** Describes each model output and how to interpret it. | In addition, fields from the following extensions must be imported in the item: @@ -94,17 +94,18 @@ A deviation from the [STAC 1.1 Bands Object](https://github.com/radiantearth/sta ### Runtime Object -| Field Name | Type | Description | -|-----------------------|---------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| framework | string | **REQUIRED.** Used framework (ex: PyTorch, TensorFlow). | -| version | string | **REQUIRED.** Framework version (some models require a specific version of the framework). | -| model_asset | [Asset Object](stac-asset) | **REQUIRED.** Asset object containing URI to the model file. | -| source_code | [Asset Object](stac-asset) | **REQUIRED.** Source code description. Can describe a github repo, zip archive, etc. This description should reference the inference function, for example my_package.my_module.predict | -| accelerator | [Accelerator Enum](#accelerator-enum) | **REQUIRED.** The intended accelerator that runs inference. | -| hardware_summary | string | **REQUIRED.** A high level description of the number of accelerators, specific generation of accelerator, or other relevant inference details. | -| docker | [Container](#container) | **RECOMMENDED.** Information for the deployment of the model in a docker instance. | -| model_commit_hash | string | Hash value pointing to a specific version of the code. | -| batch_size_suggestion | number | A suggested batch size for the accelerator and summarized hardware. | +| Field Name | Type | Description | +|-------------------------|---------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| framework | string | **REQUIRED.** Used framework (ex: PyTorch, TensorFlow). | +| version | string | **REQUIRED.** Framework version (some models require a specific version of the framework). | +| model_asset | [Asset Object](stac-asset) | **REQUIRED.** Asset object containing URI to the model file. | +| source_code | [Asset Object](stac-asset) | **REQUIRED.** Source code description. Can describe a github repo, zip archive, etc. This description should reference the inference function, for example my_package.my_module.predict | +| accelerator | [Accelerator Enum](#accelerator-enum) | **REQUIRED.** The intended accelerator that runs inference. | +| accelerator_constrained | boolean | **REQUIRED.** If the intended accelerator is the only accelerator that can run inference. If False, other accelerators, such as the amd64 (CPU), can run inference | +| hardware_summary | string | **REQUIRED.** A high level description of the number of accelerators, specific generation of accelerator, or other relevant inference details. | +| docker | [Container](#container) | **RECOMMENDED.** Information for the deployment of the model in a docker instance. | +| model_commit_hash | string | Hash value pointing to a specific version of the code. | +| batch_size_suggestion | number | A suggested batch size for the accelerator and summarized hardware. | #### Accelerator Enum @@ -154,7 +155,7 @@ You can also use other base images. Pytorch and Tensorflow offer docker images f - [Torchserve](https://pytorch.org/serve/) - [TFServing](https://github.com/tensorflow/serving) -### Output Object +### Model Output Object | Field Name | Type | Description | |--------------------------|---------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| From eb8b80a3189c8544357c0c96edfd378432afa69a Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Wed, 10 Jan 2024 10:58:27 -0800 Subject: [PATCH 016/112] account for models that take parameters with one or more of each input or as separate inputs --- README.md | 34 +++++++++++++++++++++------------- 1 file changed, 21 insertions(+), 13 deletions(-) diff --git a/README.md b/README.md index 0d2cc94..e5e9485 100644 --- a/README.md +++ b/README.md @@ -36,13 +36,13 @@ Check the original technical report for an earlier version of the Model Extensio ## Item Properties and Collection Fields -| Field Name | Type | Description | -|------------------|---------------------------------------------|-------------------------------------------------------------------------------------| -| mlm:input | [[Model Input Object](#model-input-object)] | **REQUIRED.** Describes the transformation between the EO data and the model input. | -| mlm:architecture | [Architecture Object](#architecture-object) | **REQUIRED.** Describes the model architecture. | -| mlm:runtime | [Runtime Object](#runtime-object) | **REQUIRED.** Describes the runtime environments to run the model (inference). | -| mlm:output | [Model Output Object](#model-output-object) | **REQUIRED.** Describes each model output and how to interpret it. | - +| Field Name | Type | Description | +|------------------|---------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------| +| mlm:input | [[Model Input Object](#model-input-object)] | **REQUIRED.** Describes the transformation between the EO data and the model input. | +| mlm:architecture | [Architecture Object](#architecture-object) | **REQUIRED.** Describes the model architecture. | +| mlm:runtime | [Runtime Object](#runtime-object) | **REQUIRED.** Describes the runtime environments to run the model (inference). | +| mlm:output | [Model Output Object](#model-output-object) | **REQUIRED.** Describes each model output and how to interpret it. | +| parameters | [Parameters Object](#params-object) | Mapping with names for the parameters and their values. Some models may take additional scalars, tuples, and other non-tensor inputs like text. | In addition, fields from the following extensions must be imported in the item: - [Scientific Extension Specification][stac-ext-sci] to describe relevant publications. @@ -56,15 +56,23 @@ In addition, fields from the following extensions must be imported in the item: | Field Name | Type | Description | |-------------------------|-----------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | name | string | **REQUIRED.** Informative name of the input variable. Example "RGB Time Series" | -| bands | [string] | **REQUIRED.** Describes the EO bands used to train or fine-tune the model, which may be all or a subset of bands available in a STAc Item's [Band Object](#bands-and-statistics). | +| bands | [string] | **REQUIRED.** Describes the EO bands used to train or fine-tune the model, which may be all or a subset of bands available in a STAC Item's [Band Object](#bands-and-statistics). | | input_feature | [Feature Array Object](#feature-array-object) | **REQUIRED.** The N-dimensional feature array object that describes the shape, dimension ordering, and data type. | -| params | dict | Dictionary with names for the parameters and their values. Some models may take multiple input arrays, scalars, other non-tensor inputs. | +| parameters | [Parameters Object](#params-object) | Mapping with names for the parameters and their values. Some models may take additional scalars, tuples, and other non-tensor inputs like text. | | norm_by_channel | boolean | Whether to normalize each channel by channel-wise statistics or to normalize by dataset statistics. | | norm_type | string | Normalization method. Select one option from "min_max", "z_score", "max_norm", "mean_norm", "unit_variance", "none" | | rescale_type | string | High-level descriptor of the rescaling method to change image shape. Select one option from "crop", "pad", "interpolation", "none". If your rescaling method combines more than one of these operations, provide the name of the operation instead | | statistics | [Statistics Object](stac-statistics) | Dataset statistics for the training dataset used to normalize the inputs. | | pre_processing_function | string | A url to the preprocessing function where normalization and rescaling takes place, and any other significant operations. Or, instead, the function code path, for example: my_python_module_name:my_processing_function | +#### Parameters Object + +| Field Name | Type | Description | +|-----------------------------------|---------|--------------------------------------------------------------------------| +| *parameter names depend on the model* | number | string | boolean | array | The field number and names depend on the model as do the values. Values should be not be n-dimensional array inputs. If the model input can be represented as an n-dimensional array, it should instead be supplied as another model input object. | + +The parameters field can either be specified in the model input object if they are associated with a specific input or as an Item or Collection field if the parameters are supplied without relation to a specific model input. + #### Bands and Statistics We use the [STAC 1.1 Bands Object](https://github.com/radiantearth/stac-spec/pull/1254) for representing bands information, including nodata value, data type, and common band names. Only bands used to train or fine tune the model should be included in this `bands` field. @@ -75,10 +83,10 @@ A deviation from the [STAC 1.1 Bands Object](https://github.com/radiantearth/sta #### Feature Array Object -| Field Name | Type | Description | -|------------|-----------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Field Name | Type | Description | +|------------|-----------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | shape | [integer] | **REQUIRED.** Shape of the input n-dimensional feature array ($N \times C \times H \times W$), including the batch size dimension. The batch size dimension must either be greater than 0 or -1 to indicate an unspecified batch dimension size. | -| dim_order | string | **REQUIRED.** How the above dimensions are ordered with the tensor. "bhw", "bchw", "bthw", "btchw" are valid orderings where b=batch, c=channel, t=time, h=height, w=width | +| dim_order | string | **REQUIRED.** How the above dimensions are ordered with the tensor. "bhw", "bchw", "bthw", "btchw" are valid orderings where b=batch, c=channel, t=time, h=height, w=width | | dtype | string | **REQUIRED.** The data type of values in the feature array. Suggested to use [Numpy numerical types](https://numpy.org/devdocs/user/basics.types.html), omitting the numpy module, e.g. "float32" | ### Architecture Object @@ -90,7 +98,7 @@ A deviation from the [STAC 1.1 Bands Object](https://github.com/radiantearth/sta | memory_size | integer | **REQUIRED.** The in-memory size of the model on the accelerator during inference (bytes). | | summary | string | Summary of the layers, can be the output of `print(model)`. | | pretrained_source | string | Indicates the source of the pretraining (ex: ImageNet). | -| total_parameters | integer | Total number of parameters. | +| total_parameters | integer | Total number of parameters. ### Runtime Object From 5378bae562640086abeda1ebbecf2f734fae2d69 Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Wed, 10 Jan 2024 11:14:06 -0800 Subject: [PATCH 017/112] properly escape or operators --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index e5e9485..97a94c8 100644 --- a/README.md +++ b/README.md @@ -69,7 +69,7 @@ In addition, fields from the following extensions must be imported in the item: | Field Name | Type | Description | |-----------------------------------|---------|--------------------------------------------------------------------------| -| *parameter names depend on the model* | number | string | boolean | array | The field number and names depend on the model as do the values. Values should be not be n-dimensional array inputs. If the model input can be represented as an n-dimensional array, it should instead be supplied as another model input object. | +| *parameter names depend on the model* | number `\|` string `\|` boolean `\|` array | The field number and names depend on the model as do the values. Values should be not be n-dimensional array inputs. If the model input can be represented as an n-dimensional array, it should instead be supplied as another model input object. | The parameters field can either be specified in the model input object if they are associated with a specific input or as an Item or Collection field if the parameters are supplied without relation to a specific model input. From 0c9f1f2b13f369054250800363f5a8a04ed0550f Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Wed, 10 Jan 2024 11:31:05 -0800 Subject: [PATCH 018/112] language edits for model input, change stats to have type option for array for channelwise norm --- README.md | 41 +++++++++++++++++++++++------------------ 1 file changed, 23 insertions(+), 18 deletions(-) diff --git a/README.md b/README.md index 97a94c8..aff89d8 100644 --- a/README.md +++ b/README.md @@ -15,14 +15,19 @@ The STAC Machine Learning Model (MLM) Extension provides a standard set of fields to describe machine learning models trained on overhead imagery and enable running model inference. -The main objective of the extension is two-fold: 1) to enable building model collections that can be searched alongside associated STAC datasets and 2) to record all necessary bands, parameters, modeling artifact locations, and high-level processing steps to deploy an inference service. Specifically, this extension records the following information to make ML models searchable and reusable: +The main objectives of the extension are: + +1) to enable building model collections that can be searched alongside associated STAC datasets +2) record all necessary bands, parameters, modeling artifact locations, and high-level processing steps to deploy an inference service. + +Specifically, this extension records the following information to make ML models searchable and reusable: 1. Sensor band specifications -2. Model input transforms including rescale and normalization -3. Model output shape, data type, and its semantic interpretation -4. An optional, flexible description of the runtime environment to be able to run the model -5. Scientific references +1. Model input transforms including rescale and normalization +1. Model output shape, data type, and its semantic interpretation +1. An optional, flexible description of the runtime environment to be able to run the model +1. Scientific references -Note: The MLM specification is biased towards supervised ML models the produce classifications. However, fields that relate to supervised ML are optional and users can use the fields they need for different tasks. +The MLM specification is biased towards supervised ML models the produce classifications. However, fields that relate to supervised ML are optional and users can use the fields they need for different tasks. Check the original technical report for an earlier version of the Model Extension [here](https://github.com/crim-ca/CCCOT03/raw/main/CCCOT03_Rapport%20Final_FINAL_EN.pdf) for more details. @@ -31,7 +36,7 @@ Check the original technical report for an earlier version of the Model Extensio - Examples: - [Example with a ??? trained with torchgeo](examples/item.json) TODO update example - [Collection example](examples/collection.json): Shows the basic usage of the extension in a STAC Collection -- [JSON Schema](json-schema/schema.json) +- [JSON Schema](json-schema/schema.json) TODO update - [Changelog](./CHANGELOG.md) ## Item Properties and Collection Fields @@ -53,17 +58,17 @@ In addition, fields from the following extensions must be imported in the item: ### Model Input Object -| Field Name | Type | Description | -|-------------------------|-----------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| name | string | **REQUIRED.** Informative name of the input variable. Example "RGB Time Series" | -| bands | [string] | **REQUIRED.** Describes the EO bands used to train or fine-tune the model, which may be all or a subset of bands available in a STAC Item's [Band Object](#bands-and-statistics). | -| input_feature | [Feature Array Object](#feature-array-object) | **REQUIRED.** The N-dimensional feature array object that describes the shape, dimension ordering, and data type. | -| parameters | [Parameters Object](#params-object) | Mapping with names for the parameters and their values. Some models may take additional scalars, tuples, and other non-tensor inputs like text. | -| norm_by_channel | boolean | Whether to normalize each channel by channel-wise statistics or to normalize by dataset statistics. | -| norm_type | string | Normalization method. Select one option from "min_max", "z_score", "max_norm", "mean_norm", "unit_variance", "none" | -| rescale_type | string | High-level descriptor of the rescaling method to change image shape. Select one option from "crop", "pad", "interpolation", "none". If your rescaling method combines more than one of these operations, provide the name of the operation instead | -| statistics | [Statistics Object](stac-statistics) | Dataset statistics for the training dataset used to normalize the inputs. | -| pre_processing_function | string | A url to the preprocessing function where normalization and rescaling takes place, and any other significant operations. Or, instead, the function code path, for example: my_python_module_name:my_processing_function | +| Field Name | Type | Description | | +|-------------------------|-------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------| +| name | string | **REQUIRED.** Informative name of the input variable. Example "RGB Time Series" | | +| bands | [string] | **REQUIRED.** The names of the raster bands used to train or fine-tune the model, which may be all or a subset of bands available in a STAC Item's [Band Object](#bands-and-statistics). | | +| input_array | [Array Object](#feature-array-object) | **REQUIRED.** The N-dimensional array object that describes the shape, dimension ordering, and data type. | | +| parameters | [Parameters Object](#params-object) | Mapping with names for the parameters and their values. Some models may take additional scalars, tuples, and other non-tensor inputs like text. | | +| norm_by_channel | boolean | Whether to normalize each channel by channel-wise statistics or to normalize by dataset statistics. If True, use an array of Statistics Objects that is ordered like the bands field. | | +| norm_type | string | Normalization method. Select one option from "min_max", "z_score", "max_norm", "mean_norm", "unit_variance", "none" | | +| rescale_type | string | High-level descriptor of the rescaling method to change image shape. Select one option from "crop", "pad", "interpolation", "none". If your rescaling method combines more than one of these operations, provide the name of the operation instead | | +| statistics | [Statistics Object](stac-statistics) `\ | ` [[Statistics Object](stac-statistics)] | Dataset statistics for the training dataset used to normalize the inputs. | +| pre_processing_function | string | A url to the preprocessing function where normalization and rescaling takes place, and any other significant operations. Or, instead, the function code path, for example: my_python_module_name:my_processing_function | | #### Parameters Object From b2cc2f080993a3a7c8fd99d56766cf566f7c88c0 Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Wed, 10 Jan 2024 11:54:51 -0800 Subject: [PATCH 019/112] language edits down to Result Array Object, specify derived_from rel type and how to name model json by unique mlm:name --- README.md | 59 +++++++++++++++++++++++++++++-------------------------- 1 file changed, 31 insertions(+), 28 deletions(-) diff --git a/README.md b/README.md index aff89d8..6153ab5 100644 --- a/README.md +++ b/README.md @@ -43,11 +43,12 @@ Check the original technical report for an earlier version of the Model Extensio | Field Name | Type | Description | |------------------|---------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------| +| mlm:name | string | **REQUIRED.** A unique name for the model. Should be distinct from the name of the architecture it is based on, or the name(s) of the input(s). | | mlm:input | [[Model Input Object](#model-input-object)] | **REQUIRED.** Describes the transformation between the EO data and the model input. | | mlm:architecture | [Architecture Object](#architecture-object) | **REQUIRED.** Describes the model architecture. | | mlm:runtime | [Runtime Object](#runtime-object) | **REQUIRED.** Describes the runtime environments to run the model (inference). | | mlm:output | [Model Output Object](#model-output-object) | **REQUIRED.** Describes each model output and how to interpret it. | -| parameters | [Parameters Object](#params-object) | Mapping with names for the parameters and their values. Some models may take additional scalars, tuples, and other non-tensor inputs like text. | +| mlm:parameters | [Parameters Object](#params-object) | Mapping with names for the parameters and their values. Some models may take additional scalars, tuples, and other non-tensor inputs like text. | In addition, fields from the following extensions must be imported in the item: - [Scientific Extension Specification][stac-ext-sci] to describe relevant publications. @@ -72,9 +73,9 @@ In addition, fields from the following extensions must be imported in the item: #### Parameters Object -| Field Name | Type | Description | -|-----------------------------------|---------|--------------------------------------------------------------------------| -| *parameter names depend on the model* | number `\|` string `\|` boolean `\|` array | The field number and names depend on the model as do the values. Values should be not be n-dimensional array inputs. If the model input can be represented as an n-dimensional array, it should instead be supplied as another model input object. | +| Field Name | Type | Description | | | | +|---------------------------------------|-----------|-------------|--------------|---------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| *parameter names depend on the model* | number `\ | ` string `\ | ` boolean `\ | ` array | The number of fields and their names depend on the model. Values should be not be n-dimensional array inputs. If the model input can be represented as an n-dimensional array, it should instead be supplied as another model input object. | The parameters field can either be specified in the model input object if they are associated with a specific input or as an Item or Collection field if the parameters are supplied without relation to a specific model input. @@ -82,17 +83,19 @@ The parameters field can either be specified in the model input object if they a We use the [STAC 1.1 Bands Object](https://github.com/radiantearth/stac-spec/pull/1254) for representing bands information, including nodata value, data type, and common band names. Only bands used to train or fine tune the model should be included in this `bands` field. -A deviation from the [STAC 1.1 Bands Object](https://github.com/radiantearth/stac-spec/pull/1254) is that we do not include the [Statistics](stac-statistics) object at the band object level, but at the Model Input level. This is because in machine learning, we typically only need statistics for the dataset used to train the model in order to normalize any given bands input. +A deviation from the [STAC 1.1 Bands Object](https://github.com/radiantearth/stac-spec/pull/1254) is that we do not include the [Statistics](stac-statistics) object at the band object level, but at the Model Input level. This is because in machine learning, it is common to only need overall statistics for the dataset used to train the model to normalize all bands. [stac-statistics]: https://github.com/radiantearth/stac-spec/pull/1254/files#diff-2477b726f8c5d5d1c8b391be056db325e6918e78a24b414ccd757c7fbd574079R294 -#### Feature Array Object +#### Array Object + +| Field Name | Type | Description | +|------------|-----------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| shape | [integer] | **REQUIRED.** Shape of the input n-dimensional array ($N \times C \times H \times W$), including the batch size dimension. The batch size dimension must either be greater than 0 or -1 to indicate an unspecified batch dimension size. | +| dim_order | string | **REQUIRED.** How the above dimensions are ordered with the tensor. "bhw", "bchw", "bthw", "btchw" are valid orderings where b=batch, c=channel, t=time, h=height, w=width | +| dtype | string | **REQUIRED.** The data type of values in the n-dimensional array. Suggested to use [Numpy numerical types](https://numpy.org/devdocs/user/basics.types.html), omitting the numpy module, e.g. "float32" | -| Field Name | Type | Description | -|------------|-----------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| shape | [integer] | **REQUIRED.** Shape of the input n-dimensional feature array ($N \times C \times H \times W$), including the batch size dimension. The batch size dimension must either be greater than 0 or -1 to indicate an unspecified batch dimension size. | -| dim_order | string | **REQUIRED.** How the above dimensions are ordered with the tensor. "bhw", "bchw", "bthw", "btchw" are valid orderings where b=batch, c=channel, t=time, h=height, w=width | -| dtype | string | **REQUIRED.** The data type of values in the feature array. Suggested to use [Numpy numerical types](https://numpy.org/devdocs/user/basics.types.html), omitting the numpy module, e.g. "float32" | +Note: It is common in the machine learning, computer vision, and remote sensing communities to refer to rasters that are inputs to a model as arrays or tensors. Array Objects are distinct from the JSON array type used to represent lists of values. ### Architecture Object @@ -109,14 +112,14 @@ A deviation from the [STAC 1.1 Bands Object](https://github.com/radiantearth/sta | Field Name | Type | Description | |-------------------------|---------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| framework | string | **REQUIRED.** Used framework (ex: PyTorch, TensorFlow). | -| version | string | **REQUIRED.** Framework version (some models require a specific version of the framework). | +| framework | string | **REQUIRED.** Framework used to train the model (ex: PyTorch, TensorFlow). | +| version | string | **REQUIRED.** Framework version (some models require a specific version of the framework to run). | | model_asset | [Asset Object](stac-asset) | **REQUIRED.** Asset object containing URI to the model file. | | source_code | [Asset Object](stac-asset) | **REQUIRED.** Source code description. Can describe a github repo, zip archive, etc. This description should reference the inference function, for example my_package.my_module.predict | | accelerator | [Accelerator Enum](#accelerator-enum) | **REQUIRED.** The intended accelerator that runs inference. | -| accelerator_constrained | boolean | **REQUIRED.** If the intended accelerator is the only accelerator that can run inference. If False, other accelerators, such as the amd64 (CPU), can run inference | +| accelerator_constrained | boolean | **REQUIRED.** True if the intended accelerator is the only accelerator that can run inference. False if other accelerators, such as amd64 (CPU), can run inference. | | hardware_summary | string | **REQUIRED.** A high level description of the number of accelerators, specific generation of accelerator, or other relevant inference details. | -| docker | [Container](#container) | **RECOMMENDED.** Information for the deployment of the model in a docker instance. | +| container | [Container](#container) | **RECOMMENDED.** Information for the deployment of the model in a docker instance. | | model_commit_hash | string | Hash value pointing to a specific version of the code. | | batch_size_suggestion | number | A suggested batch size for the accelerator and summarized hardware. | @@ -170,13 +173,13 @@ You can also use other base images. Pytorch and Tensorflow offer docker images f ### Model Output Object -| Field Name | Type | Description | -|--------------------------|---------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| task | [Task Enum](#task-enum) | **REQUIRED.** Specifies the Machine Learning task for which the output can be used for. | -| number_of_classes | integer | Number of classes. | -| result | [[Result Object](#result-object)] | The list of output array/tensor from the model. For example ($N \times H \times W$). Use -1 to indicate variable dimensions, like the batch dimension. | -| class_name_mapping | [Class Map Object](#class-map-object) | Mapping of the class name to an index representing the label in the model output. | -| post_processing_function | string | A url to the postprocessing function where normalization and rescaling takes place, and any other significant operations. Or, instead, the function code path, for example: my_package.my_module.my_processing_function | +| Field Name | Type | Description | +|--------------------------|-----------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| task | [Task Enum](#task-enum) | **REQUIRED.** Specifies the Machine Learning task for which the output can be used for. | +| number_of_classes | integer | Number of classes. | +| result | [[Result Array Object](#result-array-object)] | The list of output array/tensor from the model. For example ($N \times H \times W$). Use -1 to indicate variable dimensions, like the batch dimension. | +| class_name_mapping | [Class Map Object](#class-map-object) | Mapping of the class name to an index representing the label in the model output. | +| post_processing_function | string | A url to the postprocessing function where normalization and rescaling takes place, and any other significant operations. Or, instead, the function code path, for example: my_package.my_module.my_processing_function | While only `task` is a required field, all fields are recommended for supervised tasks that produce a fixed shape tensor and have output classes. `image-captioning`, `multi-modal`, and `generative` tasks may not return fixed shape tensors or classes. @@ -200,7 +203,7 @@ STAC Collections and Items employed with the model described by this extension. [stac-ext-label-props]: https://github.com/stac-extensions/label#item-properties -#### Result Object +#### Result Array Object | Field Name | Type | Description | |------------|-----------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| @@ -213,18 +216,18 @@ STAC Collections and Items employed with the model described by this extension. | Field Name | Type | Description | |-----------------------------------|---------|--------------------------------------------------------------------------| -| *class names depend on the model* | integer | There are N corresponding integer values corresponding to N class fieds. | +| *class names depend on the model* | integer | There are N corresponding integer values corresponding to N class fields. | The user can supply any number of fields for the classes of their model if the model produces a supervised classification result. | ## Relation types The following types should be used as applicable `rel` types in the -[Link Object](https://github.com/radiantearth/stac-spec/tree/master/item-spec/item-spec.md#link-object). +[Link Object](https://github.com/radiantearth/stac-spec/tree/master/item-spec/item-spec.md#link-object) of STAC Items describing Band Assets used with a model. -| Type | Description | -|----------------|---------------------------------------| -| fancy-rel-type | This link points to a fancy resource. | +| Type | Description | +|--------------|----------------------------------------------------------------------------------------------------------------------------| +| derived_from | This link points to _item.json or _collection.json. Replace with the unique mlm:name field's value. | ## Contributing From f72faaa4f8552c2d8ef6de07e1ae4a85ceb611d1 Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Wed, 10 Jan 2024 12:09:17 -0800 Subject: [PATCH 020/112] update Changelog, more language edits --- CHANGELOG.md | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index bb79cf4..c6f218d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,21 +9,25 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Added - more [Task Enum](./README.md#task-enum) tasks - [accelerator](./README#accelerators) options in [Runtime Object](./README#runtime-object) -- batch_size and hardware suggestion -- ram_size_mb to specify model ram requirements during inference +- [Model Output Object](./README.md#model-output-object) +- batch_size and hardware summary +- [`disk_size`, `memory_size`](./README#architecture-object) +- [`hardware_summary`, `accelerator`, `accelerator_constrained`](./README#runtime-object) to specify hardware requirements for inference - added time to the Tensor object as an optional dim -- Use common metadata Asset Object to refer to Model Artifact and artifact metadata as a Collection level object +- Use common metadata [Asset Object](https://github.com/radiantearth/stac-spec/blob/master/collection-spec/collection-spec.md#asset-object) to refer to model asset and source code. +- flexible [class map object](./README.md#class-map-object) and [parameters object](./README.md#parameters-object) to handle aspects of models that vary substantially in number ### Changed - selected_bands > band_names, the same human readable names used in the common metadata band objects. -- replaced normalization:mean, etc. with statistics from STAC 1.1 common metadata +- replaced normalization:mean, etc. with [statistics](./README.md#bands-and-statistics) from STAC 1.1 common metadata - added pydantic models for internal schema objects in stac_model package and published to PYPI +- specified [rel_type](./README.md#relation-types) to be `derived_from` and specify how model item or collection json should be named ### Deprecated - ### Removed -- Data Object, replaced with [common metadata band object](https://github.com/radiantearth/stac-spec/blob/f9b3c59ba810541c9da70c5f8d39635f8cba7bcd/item-spec/common-metadata.md#bands) which also records data_type and nodata type +- Data Object, replaced with [Model Input Object](./README.md#model-input-object) that uses name field from the[common metadata band object](https://github.com/radiantearth/stac-spec/blob/f9b3c59ba810541c9da70c5f8d39635f8cba7bcd/item-spec/common-metadata.md#bands) which also records data_type and nodata type # TODO link release here From 1f1891b116a967cf9d125a1e9b4c656759f595e5 Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Wed, 10 Jan 2024 12:12:02 -0800 Subject: [PATCH 021/112] update Changelog --- CHANGELOG.md | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c6f218d..0795779 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,21 +13,19 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - batch_size and hardware summary - [`disk_size`, `memory_size`](./README#architecture-object) - [`hardware_summary`, `accelerator`, `accelerator_constrained`](./README#runtime-object) to specify hardware requirements for inference -- added time to the Tensor object as an optional dim - Use common metadata [Asset Object](https://github.com/radiantearth/stac-spec/blob/master/collection-spec/collection-spec.md#asset-object) to refer to model asset and source code. - flexible [class map object](./README.md#class-map-object) and [parameters object](./README.md#parameters-object) to handle aspects of models that vary substantially in number ### Changed -- selected_bands > band_names, the same human readable names used in the common metadata band objects. - replaced normalization:mean, etc. with [statistics](./README.md#bands-and-statistics) from STAC 1.1 common metadata -- added pydantic models for internal schema objects in stac_model package and published to PYPI +- added `pydantic` models for internal schema objects in `stac_model` package and published to PYPI - specified [rel_type](./README.md#relation-types) to be `derived_from` and specify how model item or collection json should be named ### Deprecated - ### Removed -- Data Object, replaced with [Model Input Object](./README.md#model-input-object) that uses name field from the[common metadata band object](https://github.com/radiantearth/stac-spec/blob/f9b3c59ba810541c9da70c5f8d39635f8cba7bcd/item-spec/common-metadata.md#bands) which also records data_type and nodata type +- Data Object, replaced with [Model Input Object](./README.md#model-input-object) that uses the `name` field from the[common metadata band object](https://github.com/radiantearth/stac-spec/blob/f9b3c59ba810541c9da70c5f8d39635f8cba7bcd/item-spec/common-metadata.md#bands) which also records `data_type` and `nodata` type # TODO link release here From 71cac72b493a1a9fee3c4541594c7789bd32912a Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Wed, 10 Jan 2024 12:34:00 -0800 Subject: [PATCH 022/112] link lots of fields and objects to readme text within tables --- README.md | 78 +++++++++++++++++++++++++++---------------------------- 1 file changed, 39 insertions(+), 39 deletions(-) diff --git a/README.md b/README.md index 6153ab5..863263f 100644 --- a/README.md +++ b/README.md @@ -59,29 +59,29 @@ In addition, fields from the following extensions must be imported in the item: ### Model Input Object -| Field Name | Type | Description | | -|-------------------------|-------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------| -| name | string | **REQUIRED.** Informative name of the input variable. Example "RGB Time Series" | | -| bands | [string] | **REQUIRED.** The names of the raster bands used to train or fine-tune the model, which may be all or a subset of bands available in a STAC Item's [Band Object](#bands-and-statistics). | | -| input_array | [Array Object](#feature-array-object) | **REQUIRED.** The N-dimensional array object that describes the shape, dimension ordering, and data type. | | -| parameters | [Parameters Object](#params-object) | Mapping with names for the parameters and their values. Some models may take additional scalars, tuples, and other non-tensor inputs like text. | | -| norm_by_channel | boolean | Whether to normalize each channel by channel-wise statistics or to normalize by dataset statistics. If True, use an array of Statistics Objects that is ordered like the bands field. | | -| norm_type | string | Normalization method. Select one option from "min_max", "z_score", "max_norm", "mean_norm", "unit_variance", "none" | | -| rescale_type | string | High-level descriptor of the rescaling method to change image shape. Select one option from "crop", "pad", "interpolation", "none". If your rescaling method combines more than one of these operations, provide the name of the operation instead | | -| statistics | [Statistics Object](stac-statistics) `\ | ` [[Statistics Object](stac-statistics)] | Dataset statistics for the training dataset used to normalize the inputs. | -| pre_processing_function | string | A url to the preprocessing function where normalization and rescaling takes place, and any other significant operations. Or, instead, the function code path, for example: my_python_module_name:my_processing_function | | +| Field Name | Type | Description | | +|-------------------------|----------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---| +| name | string | **REQUIRED.** Informative name of the input variable. Example "RGB Time Series" | | +| bands | [string] | **REQUIRED.** The names of the raster bands used to train or fine-tune the model, which may be all or a subset of bands available in a STAC Item's [Band Object](#bands-and-statistics). | | +| input_array | [Array Object](#feature-array-object) | **REQUIRED.** The N-dimensional array object that describes the shape, dimension ordering, and data type. | | +| parameters | [Parameters Object](#params-object) | Mapping with names for the parameters and their values. Some models may take additional scalars, tuples, and other non-tensor inputs like text. | | +| norm_by_channel | boolean | Whether to normalize each channel by channel-wise statistics or to normalize by dataset statistics. If True, use an array of [Statistics Objects](#bands-and-statistics) that is ordered like the `bands` field in this object. | | +| norm_type | string | Normalization method. Select one option from "min_max", "z_score", "max_norm", "mean_norm", "unit_variance", "none" | | +| rescale_type | string | High-level descriptor of the rescaling method to change image shape. Select one option from "crop", "pad", "interpolation", "none". If your rescaling method combines more than one of these operations, provide the name of the operation instead | | +| statistics | [Statistics Object](stac-statistics) `\|` [[Statistics Object](stac-statistics)] | Dataset statistics for the training dataset used to normalize the inputs. | | +| pre_processing_function | string | A url to the preprocessing function where normalization and rescaling takes place, and any other significant operations. Or, instead, the function code path, for example: `my_python_module_name:my_processing_function` | | #### Parameters Object -| Field Name | Type | Description | | | | -|---------------------------------------|-----------|-------------|--------------|---------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| *parameter names depend on the model* | number `\ | ` string `\ | ` boolean `\ | ` array | The number of fields and their names depend on the model. Values should be not be n-dimensional array inputs. If the model input can be represented as an n-dimensional array, it should instead be supplied as another model input object. | +| Field Name | Type | Description | +|---------------------------------------|------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| *parameter names depend on the model* | number `\|` string `\|` boolean `\|` array | The number of fields and their names depend on the model. Values should be not be n-dimensional array inputs. If the model input can be represented as an n-dimensional array, it should instead be supplied as another [model input object](#model-input-object). | -The parameters field can either be specified in the model input object if they are associated with a specific input or as an Item or Collection field if the parameters are supplied without relation to a specific model input. +The parameters field can either be specified in the [model input object](#model-input-object) if they are associated with a specific input or as an [Item or Collection](#item-properties-and-collection-fields) field if the parameters are supplied without relation to a specific model input. #### Bands and Statistics -We use the [STAC 1.1 Bands Object](https://github.com/radiantearth/stac-spec/pull/1254) for representing bands information, including nodata value, data type, and common band names. Only bands used to train or fine tune the model should be included in this `bands` field. +We use the [STAC 1.1 Bands Object](https://github.com/radiantearth/stac-spec/pull/1254) for representing bands information, including the nodata value, data type, and common band names. Only bands used to train or fine tune the model should be included in this `bands` field. A deviation from the [STAC 1.1 Bands Object](https://github.com/radiantearth/stac-spec/pull/1254) is that we do not include the [Statistics](stac-statistics) object at the band object level, but at the Model Input level. This is because in machine learning, it is common to only need overall statistics for the dataset used to train the model to normalize all bands. @@ -92,34 +92,34 @@ A deviation from the [STAC 1.1 Bands Object](https://github.com/radiantearth/sta | Field Name | Type | Description | |------------|-----------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | shape | [integer] | **REQUIRED.** Shape of the input n-dimensional array ($N \times C \times H \times W$), including the batch size dimension. The batch size dimension must either be greater than 0 or -1 to indicate an unspecified batch dimension size. | -| dim_order | string | **REQUIRED.** How the above dimensions are ordered with the tensor. "bhw", "bchw", "bthw", "btchw" are valid orderings where b=batch, c=channel, t=time, h=height, w=width | +| dim_order | string | **REQUIRED.** How the above dimensions are ordered within the `shape`. "bhw", "bchw", "bthw", "btchw" are valid orderings where b=batch, c=channel, t=time, h=height, w=width. | | dtype | string | **REQUIRED.** The data type of values in the n-dimensional array. Suggested to use [Numpy numerical types](https://numpy.org/devdocs/user/basics.types.html), omitting the numpy module, e.g. "float32" | Note: It is common in the machine learning, computer vision, and remote sensing communities to refer to rasters that are inputs to a model as arrays or tensors. Array Objects are distinct from the JSON array type used to represent lists of values. ### Architecture Object -| Field Name | Type | Description | -|-------------------|---------|-----------------------------------------------------------------------------------------------| -| name | string | **REQUIRED.** The name of the model architecture. For example, "ResNet-18" or "Random Forest" | -| file_size | integer | **REQUIRED.** The size on disk of the model artifact (bytes). | -| memory_size | integer | **REQUIRED.** The in-memory size of the model on the accelerator during inference (bytes). | -| summary | string | Summary of the layers, can be the output of `print(model)`. | -| pretrained_source | string | Indicates the source of the pretraining (ex: ImageNet). | -| total_parameters | integer | Total number of parameters. +| Field Name | Type | Description | +|-------------------|---------|-----------------------------------------------------------------------------------------------------------------| +| name | string | **REQUIRED.** The name of the model architecture. For example, "ResNet-18" or "Random Forest" | +| file_size | integer | **REQUIRED.** The size on disk of the model artifact (bytes). | +| memory_size | integer | **REQUIRED.** The in-memory size of the model on the [`accelerator`](#runtime-object) during inference (bytes). | +| summary | string | Summary of the layers, can be the output of `print(model)`. | +| pretrained_source | string | Indicates the source of the pretraining (ex: ImageNet). | +| total_parameters | integer | Total number of parameters. | ### Runtime Object | Field Name | Type | Description | |-------------------------|---------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | framework | string | **REQUIRED.** Framework used to train the model (ex: PyTorch, TensorFlow). | -| version | string | **REQUIRED.** Framework version (some models require a specific version of the framework to run). | +| version | string | **REQUIRED.** `framework` version (some models require a specific version of the `framework` to run). | | model_asset | [Asset Object](stac-asset) | **REQUIRED.** Asset object containing URI to the model file. | | source_code | [Asset Object](stac-asset) | **REQUIRED.** Source code description. Can describe a github repo, zip archive, etc. This description should reference the inference function, for example my_package.my_module.predict | -| accelerator | [Accelerator Enum](#accelerator-enum) | **REQUIRED.** The intended accelerator that runs inference. | -| accelerator_constrained | boolean | **REQUIRED.** True if the intended accelerator is the only accelerator that can run inference. False if other accelerators, such as amd64 (CPU), can run inference. | -| hardware_summary | string | **REQUIRED.** A high level description of the number of accelerators, specific generation of accelerator, or other relevant inference details. | -| container | [Container](#container) | **RECOMMENDED.** Information for the deployment of the model in a docker instance. | +| accelerator | [Accelerator Enum](#accelerator-enum) | **REQUIRED.** The intended computational hardware that runs inference. | +| accelerator_constrained | boolean | **REQUIRED.** True if the intended `accelerator` is the only `accelerator` that can run inference. False if other accelerators, such as amd64 (CPU), can run inference. | +| hardware_summary | string | **REQUIRED.** A high level description of the number of accelerators, specific generation of the `accelerator`, or other relevant inference details. | +| container | [Container](#container) | **RECOMMENDED.** Information to run the model in a container instance. | | model_commit_hash | string | Hash value pointing to a specific version of the code. | | batch_size_suggestion | number | A suggested batch size for the accelerator and summarized hardware. | @@ -173,13 +173,13 @@ You can also use other base images. Pytorch and Tensorflow offer docker images f ### Model Output Object -| Field Name | Type | Description | -|--------------------------|-----------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| task | [Task Enum](#task-enum) | **REQUIRED.** Specifies the Machine Learning task for which the output can be used for. | -| number_of_classes | integer | Number of classes. | -| result | [[Result Array Object](#result-array-object)] | The list of output array/tensor from the model. For example ($N \times H \times W$). Use -1 to indicate variable dimensions, like the batch dimension. | -| class_name_mapping | [Class Map Object](#class-map-object) | Mapping of the class name to an index representing the label in the model output. | -| post_processing_function | string | A url to the postprocessing function where normalization and rescaling takes place, and any other significant operations. Or, instead, the function code path, for example: my_package.my_module.my_processing_function | +| Field Name | Type | Description | +|--------------------------|-----------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| task | [Task Enum](#task-enum) | **REQUIRED.** Specifies the Machine Learning task for which the output can be used for. | +| number_of_classes | integer | Number of classes. | +| result | [[Result Array Object](#result-array-object)] | The list of output array/tensor from the model. For example ($N \times H \times W$). Use -1 to indicate variable dimensions, like the batch dimension. | +| class_name_mapping | [Class Map Object](#class-map-object) | Mapping of the class name to an index representing the label in the model output. | +| post_processing_function | string | A url to the postprocessing function where normalization, rescaling, and other operations take place.. Or, instead, the function code path, for example: `my_package.my_module.my_processing_function` | While only `task` is a required field, all fields are recommended for supervised tasks that produce a fixed shape tensor and have output classes. `image-captioning`, `multi-modal`, and `generative` tasks may not return fixed shape tensors or classes. @@ -208,7 +208,7 @@ STAC Collections and Items employed with the model described by this extension. | Field Name | Type | Description | |------------|-----------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | shape | [integer] | **REQUIRED.** Shape of the n-dimensional result array ($N \times H \times W$), possibly including a batch size dimension. The batch size dimension must either be greater than 0 or -1 to indicate an unspecified batch dimension size. | -| dim_names | [string] | **REQUIRED.** The names of the above dimensions of the result array. | +| dim_names | [string] | **REQUIRED.** The names of the above dimensions of the result array, ordered the same as this object's `shape` field. | | dtype | string | **REQUIRED.** The data type of values in the array. Suggested to use [Numpy numerical types](https://numpy.org/devdocs/user/basics.types.html), omitting the numpy module, e.g. "float32" | @@ -227,7 +227,7 @@ The following types should be used as applicable `rel` types in the | Type | Description | |--------------|----------------------------------------------------------------------------------------------------------------------------| -| derived_from | This link points to _item.json or _collection.json. Replace with the unique mlm:name field's value. | +| derived_from | This link points to _item.json or _collection.json. Replace with the unique [`mlm:name`](#item-properties-and-collection-fields) field's value. | ## Contributing From 03e334c0a10a7c898ae2eca742419eb0efcd01b4 Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Thu, 11 Jan 2024 16:15:31 -0800 Subject: [PATCH 023/112] new precommit, align with draft 2 of spec --- stac_model/.pre-commit-config.yaml | 6 ++- stac_model/example.json | 87 ------------------------------ stac_model/pyproject.toml | 4 +- stac_model/stac_model/__main__.py | 18 +++---- stac_model/stac_model/input.py | 21 ++++---- stac_model/stac_model/output.py | 13 +++-- stac_model/stac_model/runtime.py | 26 ++++++--- stac_model/stac_model/schema.py | 24 +++++---- 8 files changed, 66 insertions(+), 133 deletions(-) delete mode 100644 stac_model/example.json diff --git a/stac_model/.pre-commit-config.yaml b/stac_model/.pre-commit-config.yaml index a905288..431dbca 100644 --- a/stac_model/.pre-commit-config.yaml +++ b/stac_model/.pre-commit-config.yaml @@ -9,5 +9,7 @@ repos: hooks: - id: check-yaml - id: end-of-file-fixer - exclude: LICENCE -# UPDATEME with additional hooks + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: '' # Use the latest version of ruff-pre-commit + hooks: + - id: ruff diff --git a/stac_model/example.json b/stac_model/example.json deleted file mode 100644 index 1b54835..0000000 --- a/stac_model/example.json +++ /dev/null @@ -1,87 +0,0 @@ -{ - "mlm_input": { - "name": "13 Band Sentinel-2 Batch", - "bands": [], - "input_array": { - "shape": [ - -1, - 13, - 64, - 64 - ], - "dim_order": "bchw", - "dtype": "float32" - }, - "norm_type": "z_score", - "rescale_type": "none", - "norm_by_channel": true, - "statistics": { - "mean": [ - 1354.40546513, - 1118.24399958, - 1042.92983953, - 947.62620298, - 1199.47283961, - 1999.79090914, - 2369.22292565, - 2296.82608323, - 732.08340178, - 12.11327804, - 1819.01027855, - 1118.92391149, - 2594.14080798 - ], - "stddev": [ - 245.71762908, - 333.00778264, - 395.09249139, - 593.75055589, - 566.4170017, - 861.18399006, - 1086.63139075, - 1117.98170791, - 404.91978886, - 4.77584468, - 1002.58768311, - 761.30323499, - 1231.58581042 - ] - }, - "pre_processing_function": "https://github.com/microsoft/torchgeo/blob/545abe8326efc2848feae69d0212a15faba3eb00/torchgeo/datamodules/eurosat.py" - }, - "mlm_architecture": { - "name": "ResNet-18", - "summary": "Sourced from torchgeo python library, identifier is ResNet18_Weights.SENTINEL2_ALL_MOCO", - "pretrained": true, - "total_parameters": 11700000 - }, - "mlm_runtime": { - "framework": "torch", - "version": "2.1.2+cu121", - "asset": { - "href": "." - }, - "source_code_url": "https://github.com/huggingface/pytorch-image-models/blob/b5a4fa9c3be6ac732807db7e87d176f5e4fc06f1/timm/models/resnet.py#L362", - "handler": "torchgeo.models.resnet.ResNet18" - }, - "mlm_output": { - "task": "classification", - "number_of_classes": 10, - "output_shape": [ - -1, - 10 - ], - "class_name_mapping": { - "Annual Crop": 0, - "Forest": 1, - "Herbaceous Vegetation": 2, - "Highway": 3, - "Industrial Buildings": 4, - "Pasture": 5, - "Permanent Crop": 6, - "Residential Buildings": 7, - "River": 8, - "SeaLake": 9 - } - } -} diff --git a/stac_model/pyproject.toml b/stac_model/pyproject.toml index b984f85..b261b5f 100644 --- a/stac_model/pyproject.toml +++ b/stac_model/pyproject.toml @@ -7,7 +7,7 @@ build-backend = "poetry.core.masonry.api" [tool.poetry] name = "stac-model" -version = "0.1.0" +version = "0.1.1.alpha0" description = "A PydanticV2 validation and serialization libary for the STAC ML Model Extension" readme = "README.md" authors = ["Ryan Avery "] @@ -49,7 +49,7 @@ rich = "^13.7.0" pydantic = "^2.5.0" # bug in post 2.3 https://github.com/pydantic/pydantic/issues/7720 pydantic-core = "~2" numpy = "^1.26.2" -fastapi="^0.108.0" +# fastapi="^0.108.0" [tool.poetry.group.dev.dependencies] diff --git a/stac_model/stac_model/__main__.py b/stac_model/stac_model/__main__.py index 926882c..e5e312d 100644 --- a/stac_model/stac_model/__main__.py +++ b/stac_model/stac_model/__main__.py @@ -2,7 +2,7 @@ from rich.console import Console from stac_model import __version__ -from stac_model.schema import * +from stac_model.schema import InputArray, Statistics, ModelInput, Architecture, Runtime, Asset, ResultArray, ModelOutput, MLModel app = typer.Typer( @@ -34,21 +34,21 @@ def main( """Generate example spec.""" input_array = InputArray( - dtype="float32", shape=[-1, 13, 64, 64], dim_order="bchw" + shape=[-1, 13, 64, 64], dim_order="bchw", dtype="float32" ) - band_list = [] - bands = [Band(name=b, description = f"Band {b}", nodata=-9999, data_type="float32", unit="reflectance") for b in band_list] + band_names = ["B01", "B02", "B03", "B04", "B05", "B06", "B07", "B08", "B8A", "B09", "B10", "B11", "B12"] stats = Statistics(mean=[1354.40546513, 1118.24399958, 1042.92983953, 947.62620298, 1199.47283961, 1999.79090914, 2369.22292565, 2296.82608323, 732.08340178, 12.11327804, 1819.01027855, 1118.92391149, 2594.14080798], stddev= [245.71762908, 333.00778264, 395.09249139, 593.75055589, 566.4170017, 861.18399006, 1086.63139075, 1117.98170791, 404.91978886, 4.77584468, 1002.58768311, 761.30323499, 1231.58581042]) - mlm_input = Input(name= "13 Band Sentinel-2 Batch", bands=bands, input_array=input_array, norm_by_channel=True, norm_type="z_score", rescale_type="none", statistics=stats, pre_processing_function = "https://github.com/microsoft/torchgeo/blob/545abe8326efc2848feae69d0212a15faba3eb00/torchgeo/datamodules/eurosat.py") - mlm_architecture = Architecture(name = "ResNet-18", total_parameters= 11_700_000, summary= "Sourced from torchgeo python library, identifier is ResNet18_Weights.SENTINEL2_ALL_MOCO", pretrained= True) + mlm_input = ModelInput(name= "13 Band Sentinel-2 Batch", bands=band_names, input_array=input_array, norm_by_channel=True, norm_type="z_score", rescale_type="none", statistics=stats, pre_processing_function = "https://github.com/microsoft/torchgeo/blob/545abe8326efc2848feae69d0212a15faba3eb00/torchgeo/datamodules/eurosat.py") + mlm_architecture = Architecture(name = "ResNet-18", file_size=1, memory_size=1, summary= "Sourced from torchgeo python library, identifier is ResNet18_Weights.SENTINEL2_ALL_MOCO", pretrained_source="EuroSat Sentinel-2", total_parameters= 11_700_000) mlm_runtime = Runtime(framework= "torch", version= "2.1.2+cu121", asset= Asset(href= "https://huggingface.co/torchgeo/resnet18_sentinel2_all_moco/resolve/main/resnet18_sentinel2_all_moco-59bfdff9.pth"), - handler= "torchgeo.models.resnet.ResNet18", source_code_url= "https://github.com/huggingface/pytorch-image-models/blob/b5a4fa9c3be6ac732807db7e87d176f5e4fc06f1/timm/models/resnet.py#L362") - mlm_output = Output(task= "classification", number_of_classes= 10, output_shape=[-1, 10], class_name_mapping= { + source_code= Asset(href="https://github.com/microsoft/torchgeo/blob/61efd2e2c4df7ebe3bd03002ebbaeaa3cfe9885a/torchgeo/models/resnet.py#L207"), accelerator="cuda", accelerator_constrained=False, hardware_summary="Unknown") + result_array = ResultArray(shape=[-1, 10], dim_names=["batch", "class"], dtype="float32") + mlm_output = ModelOutput(task= "classification", number_of_classes= 10, output_shape=[-1, 10], result_array=result_array, class_name_mapping= { "Annual Crop": 0, "Forest": 1, "Herbaceous Vegetation": 2, @@ -60,7 +60,7 @@ def main( "River": 8, "SeaLake": 9, }) - ml_model_meta = MLModel(mlm_input=mlm_input, mlm_architecture=mlm_architecture, mlm_runtime=mlm_runtime, mlm_output=mlm_output) + ml_model_meta = MLModel(mlm_name="Resnet-18 Sentinel-2 ALL MOCO", mlm_input=[mlm_input], mlm_architecture=mlm_architecture, mlm_runtime=mlm_runtime, mlm_output=mlm_output) json_str = ml_model_meta.model_dump_json(indent=2, exclude_none=True) with open("example.json", "w") as file: file.write(json_str) diff --git a/stac_model/stac_model/input.py b/stac_model/stac_model/input.py index 5f74896..2d477a9 100644 --- a/stac_model/stac_model/input.py +++ b/stac_model/stac_model/input.py @@ -1,4 +1,4 @@ -from typing import List, Optional, Literal, Dict, Literal, Union +from typing import List, Optional, Dict, Literal, Union from pydantic import ( BaseModel, Field, @@ -19,21 +19,18 @@ class Statistics(BaseModel): class Band(BaseModel): name: str - description: str + description: Optional[str] = None nodata: float | int | str data_type: str unit: Optional[str] = None -class Input(BaseModel): +class ModelInput(BaseModel): name: str - bands: List[Band] + bands: List[str] input_array: InputArray - norm_type: Literal["min_max", "z_score", "max_norm", "mean_norm", "unit_variance", "none"] - rescale_type: Literal["crop", "pad", "interpolation", "none"] - norm_by_channel: bool - params: Optional[ - Dict[str, int | float | str] - ] = None - scaling_factor: Optional[float] = None - statistics: Optional[Statistics] = None + parameters: Dict[str, Union[int, str, bool, List[Union[int, str, bool]]]] = None + norm_by_channel: bool = None + norm_type: Literal["min_max", "z_score", "max_norm", "mean_norm", "unit_variance", "none"] = None + rescale_type: Literal["crop", "pad", "interpolation", "none"] = None + statistics: Optional[Union[Statistics, List[Statistics]]] = None pre_processing_function: Optional[str | AnyUrl] = None diff --git a/stac_model/stac_model/output.py b/stac_model/stac_model/output.py index af67bef..160dd13 100644 --- a/stac_model/stac_model/output.py +++ b/stac_model/stac_model/output.py @@ -1,4 +1,4 @@ -from pydantic import BaseModel +from pydantic import BaseModel, Field from typing import List, Dict, Union, Optional from enum import Enum class TaskEnum(str, Enum): @@ -21,9 +21,14 @@ def label_id_to_class(self) -> Dict[int, str]: # Reverse the mapping return {v: k for k, v in self.class_to_label_id.items()} -class Output(BaseModel): +class ResultArray(BaseModel): + shape: List[Union[int,float]] + dim_names: List[str] + dtype: str = Field(..., pattern="^(uint8|uint16|int16|int32|float16|float32|float64)$") + +class ModelOutput(BaseModel): task: TaskEnum - number_of_classes: int - output_shape: List[Union[int,float]] + number_of_classes: int = None + result_array: ResultArray = None class_name_mapping: Optional[Dict[str, int]] = None post_processing_function: Optional[str] = None diff --git a/stac_model/stac_model/runtime.py b/stac_model/stac_model/runtime.py index e11249d..41008ca 100644 --- a/stac_model/stac_model/runtime.py +++ b/stac_model/stac_model/runtime.py @@ -1,6 +1,7 @@ from .paths import S3Path -from pydantic import BaseModel, FilePath, AnyUrl, field_validator +from pydantic import BaseModel, FilePath, field_validator from typing import Optional, List +from enum import Enum class Asset(BaseModel): """Information about the model location and other additional file locations. Follows the Asset Object spec: https://github.com/radiantearth/stac-spec/blob/master/collection-spec/collection-spec.md#asset-object @@ -30,7 +31,7 @@ def check_path_type(cls, v): ) return v -class ContainerInfo(BaseModel): +class Container(BaseModel): container_file: str image_name: str tag: str @@ -38,13 +39,26 @@ class ContainerInfo(BaseModel): run: str accelerator: bool +class AcceleratorEnum(str, Enum): + amd64 = "amd64" + cuda = "cuda" + xla = "xla" + amd_rocm = "amd-rocm" + intel_ipex_cpu = "intel-ipex-cpu" + intel_ipex_gpu = "intel-ipex-gpu" + macos_arm = "macos-arm" + + def __str__(self): + return self.value + class Runtime(BaseModel): framework: str version: str asset: Asset - source_code_url: str - handler: Optional[str] = None + source_code: Asset + accelerator: AcceleratorEnum + accelerator_constrained: bool + hardware_summary: str + container: Optional[Container] = None commit_hash: Optional[str] = None - container: Optional[ContainerInfo] = None batch_size_suggestion: Optional[int] = None - hardware_suggestion: Optional[str | AnyUrl] = None diff --git a/stac_model/stac_model/schema.py b/stac_model/stac_model/schema.py index 3017921..d811509 100644 --- a/stac_model/stac_model/schema.py +++ b/stac_model/stac_model/schema.py @@ -1,21 +1,23 @@ from pydantic import BaseModel -from typing import Optional -from .input import Input, InputArray, Band, Statistics -from .output import Output, ClassMap -from .runtime import Runtime, Asset, ContainerInfo +from .input import ModelInput, InputArray, Band, Statistics +from .output import ModelOutput, ClassMap, ResultArray +from .runtime import Runtime, Asset, Container +from typing import List, Optional, Dict, Union class Architecture(BaseModel): name: str - summary: str - pretrained: bool + file_size: int + memory_size: int + summary: str = None + pretrained_source: str = None total_parameters: Optional[int] = None - on_disk_size_mb: Optional[float] = None - ram_size_mb: Optional[float] = None class MLModel(BaseModel): - mlm_input: Input + mlm_name: str + mlm_input: List[ModelInput] mlm_architecture: Architecture mlm_runtime: Runtime - mlm_output: Output + mlm_output: ModelOutput + mlm_parameters: Dict[str, Union[int, str, bool, List[Union[int, str, bool]]]] = None -__all__ = ["MLModel", "Input", "InputArray", "Band", "Statistics", "Output", "Asset", "ClassMap", "Runtime", "ContainerInfo", "Asset", "Architecture"] +__all__ = ["MLModel", "ModelInput", "InputArray", "Band", "Statistics", "ModelOutput", "Asset", "ClassMap", "ResultArray", "Runtime", "Container", "Asset", "Architecture"] From 1e58c02bc5a8d06dce6d58ffe29db3e143352b30 Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Thu, 11 Jan 2024 16:16:13 -0800 Subject: [PATCH 024/112] version update --- stac_model/pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stac_model/pyproject.toml b/stac_model/pyproject.toml index b261b5f..de69001 100644 --- a/stac_model/pyproject.toml +++ b/stac_model/pyproject.toml @@ -7,7 +7,7 @@ build-backend = "poetry.core.masonry.api" [tool.poetry] name = "stac-model" -version = "0.1.1.alpha0" +version = "0.1.1.alpha1" description = "A PydanticV2 validation and serialization libary for the STAC ML Model Extension" readme = "README.md" authors = ["Ryan Avery "] From f379125bac26cf38e40cba906d33445b98641489 Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Thu, 11 Jan 2024 16:17:30 -0800 Subject: [PATCH 025/112] precommit version update --- stac_model/.pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stac_model/.pre-commit-config.yaml b/stac_model/.pre-commit-config.yaml index 431dbca..19283ea 100644 --- a/stac_model/.pre-commit-config.yaml +++ b/stac_model/.pre-commit-config.yaml @@ -5,11 +5,11 @@ default_stages: [commit, push] repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v2.5.0 + rev: v4.5.0 hooks: - id: check-yaml - id: end-of-file-fixer - repo: https://github.com/astral-sh/ruff-pre-commit - rev: '' # Use the latest version of ruff-pre-commit + rev: 'v0.1.12' # Use the latest version of ruff-pre-commit hooks: - id: ruff From cd628c1da3d1c7ada53d205e0957a2be82671519 Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Wed, 14 Feb 2024 10:26:11 -0800 Subject: [PATCH 026/112] use classification extension instead of custom class map object --- README.md | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 863263f..a2ed749 100644 --- a/README.md +++ b/README.md @@ -178,7 +178,7 @@ You can also use other base images. Pytorch and Tensorflow offer docker images f | task | [Task Enum](#task-enum) | **REQUIRED.** Specifies the Machine Learning task for which the output can be used for. | | number_of_classes | integer | Number of classes. | | result | [[Result Array Object](#result-array-object)] | The list of output array/tensor from the model. For example ($N \times H \times W$). Use -1 to indicate variable dimensions, like the batch dimension. | -| class_name_mapping | [Class Map Object](#class-map-object) | Mapping of the class name to an index representing the label in the model output. | +| classification:classes | [Class Object](#class-object) | A list of class objects adhering to the Classification extension. | | post_processing_function | string | A url to the postprocessing function where normalization, rescaling, and other operations take place.. Or, instead, the function code path, for example: `my_package.my_module.my_processing_function` | While only `task` is a required field, all fields are recommended for supervised tasks that produce a fixed shape tensor and have output classes. @@ -212,13 +212,9 @@ STAC Collections and Items employed with the model described by this extension. | dtype | string | **REQUIRED.** The data type of values in the array. Suggested to use [Numpy numerical types](https://numpy.org/devdocs/user/basics.types.html), omitting the numpy module, e.g. "float32" | -#### Class Map Object +#### Class Object -| Field Name | Type | Description | -|-----------------------------------|---------|--------------------------------------------------------------------------| -| *class names depend on the model* | integer | There are N corresponding integer values corresponding to N class fields. | - -The user can supply any number of fields for the classes of their model if the model produces a supervised classification result. | +See the documentation for the [Class Object](https://github.com/stac-extensions/classification?tab=readme-ov-file#class-object). We don't use the Bit Field Object since inputs and outputs to machine learning models don't typically use bit fields. ## Relation types From 36028001ae1fded8a28af561142efc2549beed95 Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Wed, 14 Feb 2024 17:22:28 -0800 Subject: [PATCH 027/112] flatten architecture object into top level fields, use classification extension --- README.md | 37 +++++++++++++++---------------------- 1 file changed, 15 insertions(+), 22 deletions(-) diff --git a/README.md b/README.md index a2ed749..f21396f 100644 --- a/README.md +++ b/README.md @@ -40,15 +40,20 @@ Check the original technical report for an earlier version of the Model Extensio - [Changelog](./CHANGELOG.md) ## Item Properties and Collection Fields - -| Field Name | Type | Description | -|------------------|---------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------| -| mlm:name | string | **REQUIRED.** A unique name for the model. Should be distinct from the name of the architecture it is based on, or the name(s) of the input(s). | -| mlm:input | [[Model Input Object](#model-input-object)] | **REQUIRED.** Describes the transformation between the EO data and the model input. | -| mlm:architecture | [Architecture Object](#architecture-object) | **REQUIRED.** Describes the model architecture. | -| mlm:runtime | [Runtime Object](#runtime-object) | **REQUIRED.** Describes the runtime environments to run the model (inference). | -| mlm:output | [Model Output Object](#model-output-object) | **REQUIRED.** Describes each model output and how to interpret it. | -| mlm:parameters | [Parameters Object](#params-object) | Mapping with names for the parameters and their values. Some models may take additional scalars, tuples, and other non-tensor inputs like text. | +| Field Name | Type | Description | +|-----------------------|-----------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| mlm:name | string | **REQUIRED.** A unique name for the model. This should include but be distinct from simply naming the model architecture. If there is a publication or other published work related to the model, use the official name of the model. | +| mlm:file_size | integer | **REQUIRED.** The size on disk of the model artifact (bytes). | +| mlm:memory_size | integer | **REQUIRED.** The in-memory size of the model on the accelerator during inference (bytes). | +| mlm:framework | string | **REQUIRED.** Framework used to train the model (ex: PyTorch, TensorFlow). | +| mlm:framework_version | string | **REQUIRED.** The `framework` library version. Some models require a specific version of the machine learning `framework` to run. | +| mlm:input | [[Model Input Object](#model-input-object)] | **REQUIRED.** Describes the transformation between the EO data and the model input. | +| mlm:output | [[Model Output Object](#model-output-object)] | **REQUIRED.** Describes each model output and how to interpret it. | +| mlm:runtime | [[Runtime Object](#runtime-object)] | **REQUIRED.** Describes the runtime environment(s) to run inference with the model asset(s). | +| mlm:total_parameters | integer | Total number of model parameters, including trainable and non-trainable parameters. | +| mlm:pretrained_source | string | The source of the pretraining. Can refer to popular pretraining datasets by name (i.e. Imagenet) or less known datasets by URL and description. | +| mlm:summary | string | Text summary of the model and it's purpose. | +| mlm:parameters | [Parameters Object](#params-object) | Mapping with names for the parameters and their values. Some models may take additional scalars, tuples, and other non-tensor inputs like text during inference (Segment Anything). The field should be specified here if parameters apply to all Model Input Objects. If each Model Input Object has parameters, specify parameters in that object. | In addition, fields from the following extensions must be imported in the item: - [Scientific Extension Specification][stac-ext-sci] to describe relevant publications. @@ -75,7 +80,7 @@ In addition, fields from the following extensions must be imported in the item: | Field Name | Type | Description | |---------------------------------------|------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| *parameter names depend on the model* | number `\|` string `\|` boolean `\|` array | The number of fields and their names depend on the model. Values should be not be n-dimensional array inputs. If the model input can be represented as an n-dimensional array, it should instead be supplied as another [model input object](#model-input-object). | +| *parameter names depend on the model* | number `\|` string `\|` boolean `\|` array | The number of fields and their names depend on the model. Values should not be n-dimensional array inputs. If the model input can be represented as an n-dimensional array, it should instead be supplied as another [model input object](#model-input-object). | The parameters field can either be specified in the [model input object](#model-input-object) if they are associated with a specific input or as an [Item or Collection](#item-properties-and-collection-fields) field if the parameters are supplied without relation to a specific model input. @@ -97,23 +102,11 @@ A deviation from the [STAC 1.1 Bands Object](https://github.com/radiantearth/sta Note: It is common in the machine learning, computer vision, and remote sensing communities to refer to rasters that are inputs to a model as arrays or tensors. Array Objects are distinct from the JSON array type used to represent lists of values. -### Architecture Object - -| Field Name | Type | Description | -|-------------------|---------|-----------------------------------------------------------------------------------------------------------------| -| name | string | **REQUIRED.** The name of the model architecture. For example, "ResNet-18" or "Random Forest" | -| file_size | integer | **REQUIRED.** The size on disk of the model artifact (bytes). | -| memory_size | integer | **REQUIRED.** The in-memory size of the model on the [`accelerator`](#runtime-object) during inference (bytes). | -| summary | string | Summary of the layers, can be the output of `print(model)`. | -| pretrained_source | string | Indicates the source of the pretraining (ex: ImageNet). | -| total_parameters | integer | Total number of parameters. | ### Runtime Object | Field Name | Type | Description | |-------------------------|---------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| framework | string | **REQUIRED.** Framework used to train the model (ex: PyTorch, TensorFlow). | -| version | string | **REQUIRED.** `framework` version (some models require a specific version of the `framework` to run). | | model_asset | [Asset Object](stac-asset) | **REQUIRED.** Asset object containing URI to the model file. | | source_code | [Asset Object](stac-asset) | **REQUIRED.** Source code description. Can describe a github repo, zip archive, etc. This description should reference the inference function, for example my_package.my_module.predict | | accelerator | [Accelerator Enum](#accelerator-enum) | **REQUIRED.** The intended computational hardware that runs inference. | From c0946d1a4d159268f2581b72d3e71a4d8a87e931 Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Wed, 14 Feb 2024 17:40:03 -0800 Subject: [PATCH 028/112] add best practices doc referencing processing extension --- best-practices.md | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 best-practices.md diff --git a/best-practices.md b/best-practices.md new file mode 100644 index 0000000..59d6d3f --- /dev/null +++ b/best-practices.md @@ -0,0 +1,15 @@ +# ML Model Extension Best Practices + +This document makes a number of recommendations for creating real world ML Model Extensions. None of them are required to meet the core specification, but following these practices will improve the documentation of your model and make life easier for client tooling and users. They come about from practical experience of implementors and introduce a bit more 'constraint' for those who are creating STAC objects representing their models or creating tools to work with STAC. + +## Recommended Extensions to Compose with the ML Model Extension + +### Processing Extension + +We recommend using the `processing:lineage` and `processing:level` fields from the [Processing Extension](https://github.com/stac-extensions/processing) to make it clear how [Model Input Objects](./README.md#model-input-object) are processed. + +For example: + +TODO supply example + +TODO provide other suggestions on extensions to compose with this one. STAC ML AOI, STAC Label, ... From 2ba7483b049644b909cc8aece91a882d1c7a9a60 Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Wed, 14 Feb 2024 17:43:46 -0800 Subject: [PATCH 029/112] refer to best practices in readme --- README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index f21396f..91b4f7f 100644 --- a/README.md +++ b/README.md @@ -27,7 +27,9 @@ Specifically, this extension records the following information to make ML models 1. An optional, flexible description of the runtime environment to be able to run the model 1. Scientific references -The MLM specification is biased towards supervised ML models the produce classifications. However, fields that relate to supervised ML are optional and users can use the fields they need for different tasks. +The MLM specification is biased towards providing metadata fields for supervised machine learning models. However, fields that relate to supervised ML are optional and users can use the fields they need for different tasks. + +See [Best Practices](./best-practices.md) for guidance on what other extensions to use for documenting models with this extension. Check the original technical report for an earlier version of the Model Extension [here](https://github.com/crim-ca/CCCOT03/raw/main/CCCOT03_Rapport%20Final_FINAL_EN.pdf) for more details. From d4c8f3f026cf533875486d99ba5fdaf91055f9ae Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Wed, 14 Feb 2024 17:52:18 -0800 Subject: [PATCH 030/112] add processing ex --- best-practices.md | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/best-practices.md b/best-practices.md index 59d6d3f..858a49d 100644 --- a/best-practices.md +++ b/best-practices.md @@ -6,10 +6,17 @@ This document makes a number of recommendations for creating real world ML Model ### Processing Extension -We recommend using the `processing:lineage` and `processing:level` fields from the [Processing Extension](https://github.com/stac-extensions/processing) to make it clear how [Model Input Objects](./README.md#model-input-object) are processed. +We recommend using at least the `processing:lineage` and `processing:level` fields from the [Processing Extension](https://github.com/stac-extensions/processing) to make it clear how [Model Input Objects](./README.md#model-input-object) are processed by the data provider prior to an inference preprocessing pipeline. This can help users locate the correct version of the dataset used during model inference or help them reproduce the data processing pipeline. For example: -TODO supply example +``` +"processing:lineage": "GRD Post Processing", +"processing:level": "L1C", +"processing:facility": "Copernicus S1 Core Ground Segment - DPA", +"processing:software": { + "Sentinel-1 IPF": "002.71" +} +``` TODO provide other suggestions on extensions to compose with this one. STAC ML AOI, STAC Label, ... From d7d99be4e836db32b002f2ad66172edf71d18eeb Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Thu, 15 Feb 2024 09:59:43 -0800 Subject: [PATCH 031/112] make task enum searchable, add to top level, keep in output object --- README.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 91b4f7f..f127f1d 100644 --- a/README.md +++ b/README.md @@ -42,13 +42,15 @@ Check the original technical report for an earlier version of the Model Extensio - [Changelog](./CHANGELOG.md) ## Item Properties and Collection Fields + | Field Name | Type | Description | |-----------------------|-----------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | mlm:name | string | **REQUIRED.** A unique name for the model. This should include but be distinct from simply naming the model architecture. If there is a publication or other published work related to the model, use the official name of the model. | -| mlm:file_size | integer | **REQUIRED.** The size on disk of the model artifact (bytes). | -| mlm:memory_size | integer | **REQUIRED.** The in-memory size of the model on the accelerator during inference (bytes). | +| mlm:task | [Task Enum](#task-enum) | **REQUIRED.** Specifies the primary Machine Learning task for which the output can be used for. If there are multi-modal outputs, specify the primary task and specify each task in the [Model Output Object](#model-output-object). | | mlm:framework | string | **REQUIRED.** Framework used to train the model (ex: PyTorch, TensorFlow). | | mlm:framework_version | string | **REQUIRED.** The `framework` library version. Some models require a specific version of the machine learning `framework` to run. | +| mlm:file_size | integer | **REQUIRED.** The size on disk of the model artifact (bytes). | +| mlm:memory_size | integer | **REQUIRED.** The in-memory size of the model on the accelerator during inference (bytes). | | mlm:input | [[Model Input Object](#model-input-object)] | **REQUIRED.** Describes the transformation between the EO data and the model input. | | mlm:output | [[Model Output Object](#model-output-object)] | **REQUIRED.** Describes each model output and how to interpret it. | | mlm:runtime | [[Runtime Object](#runtime-object)] | **REQUIRED.** Describes the runtime environment(s) to run inference with the model asset(s). | From 4310971e1c45325e315312ea51672fb737304a20 Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Thu, 15 Feb 2024 10:54:24 -0800 Subject: [PATCH 032/112] update Model Input object to account for normalization with clipping and specify that inference data type is required --- README.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index f127f1d..19e2d14 100644 --- a/README.md +++ b/README.md @@ -68,16 +68,19 @@ In addition, fields from the following extensions must be imported in the item: ### Model Input Object + | Field Name | Type | Description | | |-------------------------|----------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---| | name | string | **REQUIRED.** Informative name of the input variable. Example "RGB Time Series" | | | bands | [string] | **REQUIRED.** The names of the raster bands used to train or fine-tune the model, which may be all or a subset of bands available in a STAC Item's [Band Object](#bands-and-statistics). | | | input_array | [Array Object](#feature-array-object) | **REQUIRED.** The N-dimensional array object that describes the shape, dimension ordering, and data type. | | +| inference_data_type | string | **REQUIRED.** The required or suggested data type of the model input after all preprocessing has been applied and model inference is run. See the list of [common metadata data types](https://github.com/radiantearth/stac-spec/blob/f9b3c59ba810541c9da70c5f8d39635f8cba7bcd/item-spec/common-metadata.md#data-types). | | | parameters | [Parameters Object](#params-object) | Mapping with names for the parameters and their values. Some models may take additional scalars, tuples, and other non-tensor inputs like text. | | | norm_by_channel | boolean | Whether to normalize each channel by channel-wise statistics or to normalize by dataset statistics. If True, use an array of [Statistics Objects](#bands-and-statistics) that is ordered like the `bands` field in this object. | | -| norm_type | string | Normalization method. Select one option from "min_max", "z_score", "max_norm", "mean_norm", "unit_variance", "none" | | +| norm_type | string | Normalization method. Select one option from "min_max", "z_score", "max_norm", "mean_norm", "unit_variance", "norm_with_clip", "none" | | | rescale_type | string | High-level descriptor of the rescaling method to change image shape. Select one option from "crop", "pad", "interpolation", "none". If your rescaling method combines more than one of these operations, provide the name of the operation instead | | | statistics | [Statistics Object](stac-statistics) `\|` [[Statistics Object](stac-statistics)] | Dataset statistics for the training dataset used to normalize the inputs. | | +| norm_with_clip_values | [integer] | If norm_type = "norm_with_clip" this array supplies a value that is less than the band maximum. The array must be the same length as "bands", each value is used to divide each band before clipping values between 0 and 1. | | pre_processing_function | string | A url to the preprocessing function where normalization and rescaling takes place, and any other significant operations. Or, instead, the function code path, for example: `my_python_module_name:my_processing_function` | | #### Parameters Object From b95a696a16cb000f8c45e1d014e4ba2934d71302 Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Thu, 15 Feb 2024 11:10:58 -0800 Subject: [PATCH 033/112] remove superflous data type field, rely on data type in the array objects --- README.md | 33 ++++++++++++++++----------------- 1 file changed, 16 insertions(+), 17 deletions(-) diff --git a/README.md b/README.md index 19e2d14..b2acbc2 100644 --- a/README.md +++ b/README.md @@ -74,7 +74,6 @@ In addition, fields from the following extensions must be imported in the item: | name | string | **REQUIRED.** Informative name of the input variable. Example "RGB Time Series" | | | bands | [string] | **REQUIRED.** The names of the raster bands used to train or fine-tune the model, which may be all or a subset of bands available in a STAC Item's [Band Object](#bands-and-statistics). | | | input_array | [Array Object](#feature-array-object) | **REQUIRED.** The N-dimensional array object that describes the shape, dimension ordering, and data type. | | -| inference_data_type | string | **REQUIRED.** The required or suggested data type of the model input after all preprocessing has been applied and model inference is run. See the list of [common metadata data types](https://github.com/radiantearth/stac-spec/blob/f9b3c59ba810541c9da70c5f8d39635f8cba7bcd/item-spec/common-metadata.md#data-types). | | | parameters | [Parameters Object](#params-object) | Mapping with names for the parameters and their values. Some models may take additional scalars, tuples, and other non-tensor inputs like text. | | | norm_by_channel | boolean | Whether to normalize each channel by channel-wise statistics or to normalize by dataset statistics. If True, use an array of [Statistics Objects](#bands-and-statistics) that is ordered like the `bands` field in this object. | | | norm_type | string | Normalization method. Select one option from "min_max", "z_score", "max_norm", "mean_norm", "unit_variance", "norm_with_clip", "none" | | @@ -89,7 +88,7 @@ In addition, fields from the following extensions must be imported in the item: |---------------------------------------|------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | *parameter names depend on the model* | number `\|` string `\|` boolean `\|` array | The number of fields and their names depend on the model. Values should not be n-dimensional array inputs. If the model input can be represented as an n-dimensional array, it should instead be supplied as another [model input object](#model-input-object). | -The parameters field can either be specified in the [model input object](#model-input-object) if they are associated with a specific input or as an [Item or Collection](#item-properties-and-collection-fields) field if the parameters are supplied without relation to a specific model input. +The parameters field can either be specified in the [Model Input Object](#model-input-object) if they are associated with a specific input or as an [Item or Collection](#item-properties-and-collection-fields) field if the parameters are supplied without relation to a specific model input. #### Bands and Statistics @@ -101,11 +100,11 @@ A deviation from the [STAC 1.1 Bands Object](https://github.com/radiantearth/sta #### Array Object -| Field Name | Type | Description | -|------------|-----------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| shape | [integer] | **REQUIRED.** Shape of the input n-dimensional array ($N \times C \times H \times W$), including the batch size dimension. The batch size dimension must either be greater than 0 or -1 to indicate an unspecified batch dimension size. | -| dim_order | string | **REQUIRED.** How the above dimensions are ordered within the `shape`. "bhw", "bchw", "bthw", "btchw" are valid orderings where b=batch, c=channel, t=time, h=height, w=width. | -| dtype | string | **REQUIRED.** The data type of values in the n-dimensional array. Suggested to use [Numpy numerical types](https://numpy.org/devdocs/user/basics.types.html), omitting the numpy module, e.g. "float32" | +| Field Name | Type | Description | | +|------------|-----------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--| +| shape | [integer] | **REQUIRED.** Shape of the input n-dimensional array ($N \times C \times H \times W$), including the batch size dimension. The batch size dimension must either be greater than 0 or -1 to indicate an unspecified batch dimension size. | | +| dim_order | string | **REQUIRED.** How the above dimensions are ordered within the `shape`. "bhw", "bchw", "bthw", "btchw" are valid orderings where b=batch, c=channel, t=time, h=height, w=width. | | +| data_type | enum | **REQUIRED.** The data type of values in the n-dimensional array. For model inputs, this should be the data type of the processed input supplied to the model inference function, not the data type of the source bands. Use one of the [common metadata data types](https://github.com/radiantearth/stac-spec/blob/f9b3c59ba810541c9da70c5f8d39635f8cba7bcd/item-spec/common-metadata.md#data-types). | | Note: It is common in the machine learning, computer vision, and remote sensing communities to refer to rasters that are inputs to a model as arrays or tensors. Array Objects are distinct from the JSON array type used to represent lists of values. @@ -176,9 +175,8 @@ You can also use other base images. Pytorch and Tensorflow offer docker images f | Field Name | Type | Description | |--------------------------|-----------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | task | [Task Enum](#task-enum) | **REQUIRED.** Specifies the Machine Learning task for which the output can be used for. | -| number_of_classes | integer | Number of classes. | | result | [[Result Array Object](#result-array-object)] | The list of output array/tensor from the model. For example ($N \times H \times W$). Use -1 to indicate variable dimensions, like the batch dimension. | -| classification:classes | [Class Object](#class-object) | A list of class objects adhering to the Classification extension. | +| classification:classes | [[Class Object](#class-object)] | A list of class objects adhering to the [Classification extension](https://github.com/stac-extensions/classification). | | post_processing_function | string | A url to the postprocessing function where normalization, rescaling, and other operations take place.. Or, instead, the function code path, for example: `my_package.my_module.my_processing_function` | While only `task` is a required field, all fields are recommended for supervised tasks that produce a fixed shape tensor and have output classes. @@ -186,7 +184,7 @@ While only `task` is a required field, all fields are recommended for supervised #### Task Enum -It is recommended to define `task` with one of the following values: +It is recommended to define `task` with one of the following values for each Model Output Object: - `regression` - `classification` - `object detection` @@ -198,18 +196,19 @@ It is recommended to define `task` with one of the following values: - `image captioning` - `generative` -If the task falls within supervised machine learning and uses labels during training, this should align with the `label:tasks` values defined in [STAC Label Extension][stac-ext-label-props] for relevant -STAC Collections and Items employed with the model described by this extension. +If the task falls within the category of supervised machine learning and uses labels during training, this should align with the `label:tasks` values defined in [STAC Label Extension][stac-ext-label-props] for relevant +STAC Collections and Items published with the model described by this extension. [stac-ext-label-props]: https://github.com/stac-extensions/label#item-properties #### Result Array Object -| Field Name | Type | Description | -|------------|-----------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| shape | [integer] | **REQUIRED.** Shape of the n-dimensional result array ($N \times H \times W$), possibly including a batch size dimension. The batch size dimension must either be greater than 0 or -1 to indicate an unspecified batch dimension size. | -| dim_names | [string] | **REQUIRED.** The names of the above dimensions of the result array, ordered the same as this object's `shape` field. | -| dtype | string | **REQUIRED.** The data type of values in the array. Suggested to use [Numpy numerical types](https://numpy.org/devdocs/user/basics.types.html), omitting the numpy module, e.g. "float32" | +| Field Name | Type | Description | | +|------------|-----------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--| +| shape | [integer] | **REQUIRED.** Shape of the n-dimensional result array ($N \times H \times W$), possibly including a batch size dimension. The batch size dimension must either be greater than 0 or -1 to indicate an unspecified batch dimension size. | | +| dim_names | [string] | **REQUIRED.** The names of the above dimensions of the result array, ordered the same as this object's `shape` field. | | +| data_type | enum | **REQUIRED.** The data type of values in the n-dimensional array. For model outputs, this should be the data type of the result of the model inference without extra post processing. Use one of the [common metadata data types](https://github.com/radiantearth/stac-spec/blob/f9b3c59ba810541c9da70c5f8d39635f8cba7bcd/item-spec/common-metadata.md#data-types). | | + #### Class Object From af98031a1f13f74899799ecc41ce7926fa2c8e67 Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Thu, 15 Feb 2024 11:43:25 -0800 Subject: [PATCH 034/112] update stac_model and example --- stac_model/example.json | 159 ++++++++++++++++++++++++++++++ stac_model/model_metadata.py | 2 - stac_model/stac_model/__main__.py | 19 ++-- stac_model/stac_model/input.py | 7 +- stac_model/stac_model/output.py | 25 +++-- stac_model/stac_model/paths.py | 1 + stac_model/stac_model/runtime.py | 2 - stac_model/stac_model/schema.py | 27 +++-- 8 files changed, 198 insertions(+), 44 deletions(-) create mode 100644 stac_model/example.json diff --git a/stac_model/example.json b/stac_model/example.json new file mode 100644 index 0000000..2a71978 --- /dev/null +++ b/stac_model/example.json @@ -0,0 +1,159 @@ +{ + "mlm_name": "Resnet-18 Sentinel-2 ALL MOCO", + "mlm_task": "classification", + "mlm_framework": "pytorch", + "mlm_framework_version": "2.1.2+cu121", + "mlm_file_size": 1, + "mlm_memory_size": 1, + "mlm_input": [ + { + "name": "13 Band Sentinel-2 Batch", + "bands": [ + "B01", + "B02", + "B03", + "B04", + "B05", + "B06", + "B07", + "B08", + "B8A", + "B09", + "B10", + "B11", + "B12" + ], + "input_array": { + "shape": [ + -1, + 13, + 64, + 64 + ], + "dim_order": "bchw", + "data_type": "float32" + }, + "norm_by_channel": true, + "norm_type": "z_score", + "statistics": { + "mean": [ + 1354.40546513, + 1118.24399958, + 1042.92983953, + 947.62620298, + 1199.47283961, + 1999.79090914, + 2369.22292565, + 2296.82608323, + 732.08340178, + 12.11327804, + 1819.01027855, + 1118.92391149, + 2594.14080798 + ], + "stddev": [ + 245.71762908, + 333.00778264, + 395.09249139, + 593.75055589, + 566.4170017, + 861.18399006, + 1086.63139075, + 1117.98170791, + 404.91978886, + 4.77584468, + 1002.58768311, + 761.30323499, + 1231.58581042 + ] + }, + "pre_processing_function": "https://github.com/microsoft/torchgeo/blob/545abe8326efc2848feae69d0212a15faba3eb00/torchgeo/datamodules/eurosat.py" + } + ], + "mlm_output": [ + { + "task": "classification", + "result_array": [ + { + "shape": [ + -1, + 10 + ], + "dim_names": [ + "batch", + "class" + ], + "data_type": "float32" + } + ], + "classification_classes": [ + { + "value": 0, + "name": "Annual Crop", + "nodata": false + }, + { + "value": 1, + "name": "Forest", + "nodata": false + }, + { + "value": 2, + "name": "Herbaceous Vegetation", + "nodata": false + }, + { + "value": 3, + "name": "Highway", + "nodata": false + }, + { + "value": 4, + "name": "Industrial Buildings", + "nodata": false + }, + { + "value": 5, + "name": "Pasture", + "nodata": false + }, + { + "value": 6, + "name": "Permanent Crop", + "nodata": false + }, + { + "value": 7, + "name": "Residential Buildings", + "nodata": false + }, + { + "value": 8, + "name": "River", + "nodata": false + }, + { + "value": 9, + "name": "SeaLake", + "nodata": false + } + ] + } + ], + "mlm_runtime": [ + { + "asset": { + "href": "." + }, + "source_code": { + "href": "." + }, + "accelerator": "cuda", + "accelerator_constrained": false, + "hardware_summary": "Unknown" + } + ], + "mlm_total_parameters": 11700000, + "mlm_pretrained_source": "EuroSat Sentinel-2", + "mlm_summary": "Sourced from torchgeo python library, identifier is ResNet18_Weights.SENTINEL2_ALL_MOCO" +} diff --git a/stac_model/model_metadata.py b/stac_model/model_metadata.py index 447f441..fa87a81 100644 --- a/stac_model/model_metadata.py +++ b/stac_model/model_metadata.py @@ -1,8 +1,6 @@ from pydantic import BaseModel, Field, FilePath, AnyUrl from typing import Optional, List, Tuple, Dict, Literal, Any from uuid import uuid4 -import numpy as np -import re # Pydantic Models class TensorSignature(BaseModel): diff --git a/stac_model/stac_model/__main__.py b/stac_model/stac_model/__main__.py index e5e312d..6692014 100644 --- a/stac_model/stac_model/__main__.py +++ b/stac_model/stac_model/__main__.py @@ -2,12 +2,12 @@ from rich.console import Console from stac_model import __version__ -from stac_model.schema import InputArray, Statistics, ModelInput, Architecture, Runtime, Asset, ResultArray, ModelOutput, MLModel +from stac_model.schema import InputArray, Statistics, ModelInput, Runtime, Asset, ResultArray, ModelOutput, ClassObject, MLModel app = typer.Typer( name="stac-model", - help="A PydanticV2 validation and serialization libary for the STAC ML Model Extension", + help="A PydanticV2 validation and serialization library for the STAC Machine Learning Model Extension", add_completion=False, ) console = Console() @@ -34,7 +34,7 @@ def main( """Generate example spec.""" input_array = InputArray( - shape=[-1, 13, 64, 64], dim_order="bchw", dtype="float32" + shape=[-1, 13, 64, 64], dim_order="bchw", data_type="float32" ) band_names = ["B01", "B02", "B03", "B04", "B05", "B06", "B07", "B08", "B8A", "B09", "B10", "B11", "B12"] stats = Statistics(mean=[1354.40546513, 1118.24399958, 1042.92983953, 947.62620298, 1199.47283961, @@ -44,12 +44,10 @@ def main( 861.18399006, 1086.63139075, 1117.98170791, 404.91978886, 4.77584468, 1002.58768311, 761.30323499, 1231.58581042]) mlm_input = ModelInput(name= "13 Band Sentinel-2 Batch", bands=band_names, input_array=input_array, norm_by_channel=True, norm_type="z_score", rescale_type="none", statistics=stats, pre_processing_function = "https://github.com/microsoft/torchgeo/blob/545abe8326efc2848feae69d0212a15faba3eb00/torchgeo/datamodules/eurosat.py") - mlm_architecture = Architecture(name = "ResNet-18", file_size=1, memory_size=1, summary= "Sourced from torchgeo python library, identifier is ResNet18_Weights.SENTINEL2_ALL_MOCO", pretrained_source="EuroSat Sentinel-2", total_parameters= 11_700_000) mlm_runtime = Runtime(framework= "torch", version= "2.1.2+cu121", asset= Asset(href= "https://huggingface.co/torchgeo/resnet18_sentinel2_all_moco/resolve/main/resnet18_sentinel2_all_moco-59bfdff9.pth"), source_code= Asset(href="https://github.com/microsoft/torchgeo/blob/61efd2e2c4df7ebe3bd03002ebbaeaa3cfe9885a/torchgeo/models/resnet.py#L207"), accelerator="cuda", accelerator_constrained=False, hardware_summary="Unknown") - result_array = ResultArray(shape=[-1, 10], dim_names=["batch", "class"], dtype="float32") - mlm_output = ModelOutput(task= "classification", number_of_classes= 10, output_shape=[-1, 10], result_array=result_array, class_name_mapping= { - "Annual Crop": 0, + result_array = ResultArray(shape=[-1, 10], dim_names=["batch", "class"], data_type="float32") + class_map = {"Annual Crop": 0, "Forest": 1, "Herbaceous Vegetation": 2, "Highway": 3, @@ -58,9 +56,10 @@ def main( "Permanent Crop": 6, "Residential Buildings": 7, "River": 8, - "SeaLake": 9, - }) - ml_model_meta = MLModel(mlm_name="Resnet-18 Sentinel-2 ALL MOCO", mlm_input=[mlm_input], mlm_architecture=mlm_architecture, mlm_runtime=mlm_runtime, mlm_output=mlm_output) + "SeaLake": 9} + class_objects = [ClassObject(value=class_map[class_name], name=class_name) for class_name in class_map] + mlm_output = ModelOutput(task= "classification", classification_classes=class_objects, output_shape=[-1, 10], result_array=[result_array]) + ml_model_meta = MLModel(mlm_name="Resnet-18 Sentinel-2 ALL MOCO", mlm_task="classification", mlm_framework = 'pytorch', mlm_framework_version="2.1.2+cu121", mlm_file_size=1, mlm_memory_size=1, mlm_summary= "Sourced from torchgeo python library, identifier is ResNet18_Weights.SENTINEL2_ALL_MOCO", mlm_pretrained_source="EuroSat Sentinel-2", mlm_total_parameters= 11_700_000, mlm_input=[mlm_input], mlm_runtime=[mlm_runtime], mlm_output=[mlm_output]) json_str = ml_model_meta.model_dump_json(indent=2, exclude_none=True) with open("example.json", "w") as file: file.write(json_str) diff --git a/stac_model/stac_model/input.py b/stac_model/stac_model/input.py index 2d477a9..2f72b7e 100644 --- a/stac_model/stac_model/input.py +++ b/stac_model/stac_model/input.py @@ -7,7 +7,7 @@ class InputArray(BaseModel): shape: List[Union[int,float]] dim_order: Literal["bhw", "bchw", "bthw", "btchw"] - dtype: str = Field(..., pattern="^(uint8|uint16|int16|int32|float16|float32|float64)$") + data_type: str = Field(..., pattern="^(uint8|uint16|uint32|uint64|int8|int16|int32|int64|float16|float32|float64)$") class Statistics(BaseModel): minimum: Optional[List[Union[float, int]]] = None @@ -30,7 +30,8 @@ class ModelInput(BaseModel): input_array: InputArray parameters: Dict[str, Union[int, str, bool, List[Union[int, str, bool]]]] = None norm_by_channel: bool = None - norm_type: Literal["min_max", "z_score", "max_norm", "mean_norm", "unit_variance", "none"] = None - rescale_type: Literal["crop", "pad", "interpolation", "none"] = None + norm_type: Literal["min_max", "z_score", "max_norm", "mean_norm", "unit_variance", "norm_with_clip", "none"] = None + resize_type: Literal["crop", "pad", "interpolation", "none"] = None statistics: Optional[Union[Statistics, List[Statistics]]] = None + norm_with_clip_values: Optional[List[Union[float, int]]] = None pre_processing_function: Optional[str | AnyUrl] = None diff --git a/stac_model/stac_model/output.py b/stac_model/stac_model/output.py index 160dd13..9d099dc 100644 --- a/stac_model/stac_model/output.py +++ b/stac_model/stac_model/output.py @@ -1,5 +1,5 @@ from pydantic import BaseModel, Field -from typing import List, Dict, Union, Optional +from typing import List, Union, Optional from enum import Enum class TaskEnum(str, Enum): regression = "regression" @@ -12,23 +12,22 @@ class TaskEnum(str, Enum): similarity_search = "similarity search" image_captioning = "image captioning" generative = "generative" - -class ClassMap(BaseModel): - class_to_label_id: Dict[str, int] - # Property to reverse the mapping - @property - def label_id_to_class(self) -> Dict[int, str]: - # Reverse the mapping - return {v: k for k, v in self.class_to_label_id.items()} + super_resolution = "super resolution" class ResultArray(BaseModel): shape: List[Union[int,float]] dim_names: List[str] - dtype: str = Field(..., pattern="^(uint8|uint16|int16|int32|float16|float32|float64)$") + data_type: str = Field(..., pattern="^(uint8|uint16|uint32|uint64|int8|int16|int32|int64|float16|float32|float64)$") +class ClassObject(BaseModel): + value: int + name: str + description: str = None + title: str = None + color_hint: str = None + nodata: bool = False class ModelOutput(BaseModel): task: TaskEnum - number_of_classes: int = None - result_array: ResultArray = None - class_name_mapping: Optional[Dict[str, int]] = None + result_array: List[ResultArray] = None + classification_classes: List[ClassObject] = None post_processing_function: Optional[str] = None diff --git a/stac_model/stac_model/paths.py b/stac_model/stac_model/paths.py index 6786536..95b5bc3 100644 --- a/stac_model/stac_model/paths.py +++ b/stac_model/stac_model/paths.py @@ -2,6 +2,7 @@ field_validator, AnyUrl ) +import re class S3Path(AnyUrl): allowed_schemes = {"s3"} user_required = False diff --git a/stac_model/stac_model/runtime.py b/stac_model/stac_model/runtime.py index 41008ca..20043ad 100644 --- a/stac_model/stac_model/runtime.py +++ b/stac_model/stac_model/runtime.py @@ -52,8 +52,6 @@ def __str__(self): return self.value class Runtime(BaseModel): - framework: str - version: str asset: Asset source_code: Asset accelerator: AcceleratorEnum diff --git a/stac_model/stac_model/schema.py b/stac_model/stac_model/schema.py index d811509..2b053aa 100644 --- a/stac_model/stac_model/schema.py +++ b/stac_model/stac_model/schema.py @@ -1,23 +1,22 @@ from pydantic import BaseModel from .input import ModelInput, InputArray, Band, Statistics -from .output import ModelOutput, ClassMap, ResultArray +from .output import ModelOutput, ClassObject, ResultArray, TaskEnum from .runtime import Runtime, Asset, Container -from typing import List, Optional, Dict, Union - -class Architecture(BaseModel): - name: str - file_size: int - memory_size: int - summary: str = None - pretrained_source: str = None - total_parameters: Optional[int] = None +from typing import List, Dict, Union class MLModel(BaseModel): mlm_name: str + mlm_task: TaskEnum + mlm_framework: str + mlm_framework_version: str + mlm_file_size: int + mlm_memory_size: int mlm_input: List[ModelInput] - mlm_architecture: Architecture - mlm_runtime: Runtime - mlm_output: ModelOutput + mlm_output: List[ModelOutput] + mlm_runtime: List[Runtime] + mlm_total_parameters: int + mlm_pretrained_source: str + mlm_summary: str mlm_parameters: Dict[str, Union[int, str, bool, List[Union[int, str, bool]]]] = None -__all__ = ["MLModel", "ModelInput", "InputArray", "Band", "Statistics", "ModelOutput", "Asset", "ClassMap", "ResultArray", "Runtime", "Container", "Asset", "Architecture"] +__all__ = ["MLModel", "ModelInput", "InputArray", "Band", "Statistics", "ModelOutput", "ClassObject", "Asset", "ResultArray", "Runtime", "Container", "Asset"] From 1aba5fc5400e415fcf927ea7ae2415c4df5c2918 Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Thu, 15 Feb 2024 11:45:18 -0800 Subject: [PATCH 035/112] rescale -> resize add super res task --- stac_model/pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stac_model/pyproject.toml b/stac_model/pyproject.toml index de69001..59b02bc 100644 --- a/stac_model/pyproject.toml +++ b/stac_model/pyproject.toml @@ -7,7 +7,7 @@ build-backend = "poetry.core.masonry.api" [tool.poetry] name = "stac-model" -version = "0.1.1.alpha1" +version = "0.1.1.alpha2" description = "A PydanticV2 validation and serialization libary for the STAC ML Model Extension" readme = "README.md" authors = ["Ryan Avery "] From f86a64b4e67819d4ee76e788d9a15232cecd37db Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Thu, 15 Feb 2024 12:04:32 -0800 Subject: [PATCH 036/112] update example --- stac_model/README.md | 245 +++++++++++++++++++++++++++---------------- 1 file changed, 155 insertions(+), 90 deletions(-) diff --git a/stac_model/README.md b/stac_model/README.md index c746941..4e59a86 100644 --- a/stac_model/README.md +++ b/stac_model/README.md @@ -36,109 +36,174 @@ Then you can run stac-model --help ``` -or with `Poetry`: - -```bash -poetry run stac-model --help -``` - ## Creating an example metadata json ``` -poetry run stac-model +stac-model ``` This will make an example example.json metadata file for an example model. Currently this looks like -``` -{ - "mlm_input": { - "name": "13 Band Sentinel-2 Batch", - "bands": [], - "input_array": { - "shape": [ - -1, - 13, - 64, - 64 +```json + "mlm_name": "Resnet-18 Sentinel-2 ALL MOCO", + "mlm_task": "classification", + "mlm_framework": "pytorch", + "mlm_framework_version": "2.1.2+cu121", + "mlm_file_size": 1, + "mlm_memory_size": 1, + "mlm_input": [ + { + "name": "13 Band Sentinel-2 Batch", + "bands": [ + "B01", + "B02", + "B03", + "B04", + "B05", + "B06", + "B07", + "B08", + "B8A", + "B09", + "B10", + "B11", + "B12" ], - "dim_order": "bchw", - "dtype": "float32" - }, - "norm_type": "z_score", - "rescale_type": "none", - "norm_by_channel": true, - "statistics": { - "mean": [ - 1354.40546513, - 1118.24399958, - 1042.92983953, - 947.62620298, - 1199.47283961, - 1999.79090914, - 2369.22292565, - 2296.82608323, - 732.08340178, - 12.11327804, - 1819.01027855, - 1118.92391149, - 2594.14080798 + "input_array": { + "shape": [ + -1, + 13, + 64, + 64 + ], + "dim_order": "bchw", + "data_type": "float32" + }, + "norm_by_channel": true, + "norm_type": "z_score", + "statistics": { + "mean": [ + 1354.40546513, + 1118.24399958, + 1042.92983953, + 947.62620298, + 1199.47283961, + 1999.79090914, + 2369.22292565, + 2296.82608323, + 732.08340178, + 12.11327804, + 1819.01027855, + 1118.92391149, + 2594.14080798 + ], + "stddev": [ + 245.71762908, + 333.00778264, + 395.09249139, + 593.75055589, + 566.4170017, + 861.18399006, + 1086.63139075, + 1117.98170791, + 404.91978886, + 4.77584468, + 1002.58768311, + 761.30323499, + 1231.58581042 + ] + }, + "pre_processing_function": "https://github.com/microsoft/torchgeo/blob/545abe8326efc2848feae69d0212a15faba3eb00/torchgeo/datamodules/eurosat.py" + } + ], + "mlm_output": [ + { + "task": "classification", + "result_array": [ + { + "shape": [ + -1, + 10 + ], + "dim_names": [ + "batch", + "class" + ], + "data_type": "float32" + } ], - "stddev": [ - 245.71762908, - 333.00778264, - 395.09249139, - 593.75055589, - 566.4170017, - 861.18399006, - 1086.63139075, - 1117.98170791, - 404.91978886, - 4.77584468, - 1002.58768311, - 761.30323499, - 1231.58581042 + "classification_classes": [ + { + "value": 0, + "name": "Annual Crop", + "nodata": false + }, + { + "value": 1, + "name": "Forest", + "nodata": false + }, + { + "value": 2, + "name": "Herbaceous Vegetation", + "nodata": false + }, + { + "value": 3, + "name": "Highway", + "nodata": false + }, + { + "value": 4, + "name": "Industrial Buildings", + "nodata": false + }, + { + "value": 5, + "name": "Pasture", + "nodata": false + }, + { + "value": 6, + "name": "Permanent Crop", + "nodata": false + }, + { + "value": 7, + "name": "Residential Buildings", + "nodata": false + }, + { + "value": 8, + "name": "River", + "nodata": false + }, + { + "value": 9, + "name": "SeaLake", + "nodata": false + } ] - }, - "pre_processing_function": "https://github.com/microsoft/torchgeo/blob/545abe8326efc2848feae69d0212a15faba3eb00/torchgeo/datamodules/eurosat.py" - }, - "mlm_architecture": { - "name": "ResNet-18", - "summary": "Sourced from torchgeo python library, identifier is ResNet18_Weights.SENTINEL2_ALL_MOCO", - "pretrained": true, - "total_parameters": 11700000 - }, - "mlm_runtime": { - "framework": "torch", - "version": "2.1.2+cu121", - "asset": { - "href": "." - }, - "source_code_url": "https://github.com/huggingface/pytorch-image-models/blob/b5a4fa9c3be6ac732807db7e87d176f5e4fc06f1/timm/models/resnet.py#L362", - "handler": "torchgeo.models.resnet.ResNet18" - }, - "mlm_output": { - "task": "classification", - "number_of_classes": 10, - "output_shape": [ - -1, - 10 - ], - "class_name_mapping": { - "Annual Crop": 0, - "Forest": 1, - "Herbaceous Vegetation": 2, - "Highway": 3, - "Industrial Buildings": 4, - "Pasture": 5, - "Permanent Crop": 6, - "Residential Buildings": 7, - "River": 8, - "SeaLake": 9 } - } + ], + "mlm_runtime": [ + { + "asset": { + "href": "." + }, + "source_code": { + "href": "." + }, + "accelerator": "cuda", + "accelerator_constrained": false, + "hardware_summary": "Unknown" + } + ], + "mlm_total_parameters": 11700000, + "mlm_pretrained_source": "EuroSat Sentinel-2", + "mlm_summary": "Sourced from torchgeo python library, identifier is ResNet18_Weights.SENTINEL2_ALL_MOCO" } ``` From 76c0ba91b9cc12fdfec814865a6ff8242160ab39 Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Fri, 23 Feb 2024 11:35:28 -0800 Subject: [PATCH 037/112] best practices for processing ext, format and lint --- README.md | 5 +- stac_model/model_metadata.py | 67 +++- stac_model/poetry.lock | 545 +++++++++++++----------------- stac_model/pyproject.toml | 17 +- stac_model/stac_model/__main__.py | 157 +++++++-- stac_model/stac_model/input.py | 31 +- stac_model/stac_model/output.py | 22 +- stac_model/stac_model/paths.py | 10 +- stac_model/stac_model/runtime.py | 23 +- stac_model/stac_model/schema.py | 27 +- stac_model/tests/test_schema.py | 14 +- 11 files changed, 523 insertions(+), 395 deletions(-) diff --git a/README.md b/README.md index b2acbc2..11cf2c3 100644 --- a/README.md +++ b/README.md @@ -22,7 +22,7 @@ The main objectives of the extension are: Specifically, this extension records the following information to make ML models searchable and reusable: 1. Sensor band specifications -1. Model input transforms including rescale and normalization +1. Model input transforms including resize and normalization 1. Model output shape, data type, and its semantic interpretation 1. An optional, flexible description of the runtime environment to be able to run the model 1. Scientific references @@ -77,7 +77,7 @@ In addition, fields from the following extensions must be imported in the item: | parameters | [Parameters Object](#params-object) | Mapping with names for the parameters and their values. Some models may take additional scalars, tuples, and other non-tensor inputs like text. | | | norm_by_channel | boolean | Whether to normalize each channel by channel-wise statistics or to normalize by dataset statistics. If True, use an array of [Statistics Objects](#bands-and-statistics) that is ordered like the `bands` field in this object. | | | norm_type | string | Normalization method. Select one option from "min_max", "z_score", "max_norm", "mean_norm", "unit_variance", "norm_with_clip", "none" | | -| rescale_type | string | High-level descriptor of the rescaling method to change image shape. Select one option from "crop", "pad", "interpolation", "none". If your rescaling method combines more than one of these operations, provide the name of the operation instead | | +| resize_type | string | High-level descriptor of the rescaling method to change image shape. Select one option from "crop", "pad", "interpolation", "none". If your rescaling method combines more than one of these operations, provide the name of the operation instead | | | statistics | [Statistics Object](stac-statistics) `\|` [[Statistics Object](stac-statistics)] | Dataset statistics for the training dataset used to normalize the inputs. | | | norm_with_clip_values | [integer] | If norm_type = "norm_with_clip" this array supplies a value that is less than the band maximum. The array must be the same length as "bands", each value is used to divide each band before clipping values between 0 and 1. | | pre_processing_function | string | A url to the preprocessing function where normalization and rescaling takes place, and any other significant operations. Or, instead, the function code path, for example: `my_python_module_name:my_processing_function` | | @@ -195,6 +195,7 @@ It is recommended to define `task` with one of the following values for each Mod - `similarity search` - `image captioning` - `generative` +- `super resolution` If the task falls within the category of supervised machine learning and uses labels during training, this should align with the `label:tasks` values defined in [STAC Label Extension][stac-ext-label-props] for relevant STAC Collections and Items published with the model described by this extension. diff --git a/stac_model/model_metadata.py b/stac_model/model_metadata.py index fa87a81..9e0c502 100644 --- a/stac_model/model_metadata.py +++ b/stac_model/model_metadata.py @@ -1,13 +1,16 @@ -from pydantic import BaseModel, Field, FilePath, AnyUrl -from typing import Optional, List, Tuple, Dict, Literal, Any +from typing import Any, Dict, List, Literal, Optional, Tuple from uuid import uuid4 +from pydantic import AnyUrl, BaseModel, Field, FilePath + + # Pydantic Models class TensorSignature(BaseModel): name: Optional[str] = None dtype: Any = Field(...) shape: Tuple[int, ...] | List[int] = Field(...) + class ModelSignature(BaseModel): inputs: List[TensorSignature] outputs: List[TensorSignature] @@ -16,32 +19,35 @@ class ModelSignature(BaseModel): class Config: arbitrary_types_allowed = True + class RuntimeConfig(BaseModel): environment: str + class S3Path(AnyUrl): - allowed_schemes = {'s3'} + allowed_schemes = {"s3"} user_required = False max_length = 1023 min_length = 8 @classmethod def validate_s3_url(cls, v): - if not v.startswith('s3://'): - raise ValueError('S3 path must start with s3://') + if not v.startswith("s3://"): + raise ValueError("S3 path must start with s3://") return v @classmethod def validate_bucket_name(cls, v): if not v: - raise ValueError('Bucket name cannot be empty') + raise ValueError("Bucket name cannot be empty") return v @classmethod def validate_key(cls, v): - if '//' in v: - raise ValueError('Key must not contain double slashes') - return v.strip('/') + if "//" in v: + raise ValueError("Key must not contain double slashes") + return v.strip("/") + class ModelArtifact(BaseModel): path: S3Path | FilePath | str = Field(...) @@ -50,6 +56,7 @@ class ModelArtifact(BaseModel): class Config: arbitrary_types_allowed = True + class ClassMap(BaseModel): class_to_label_id: Dict[str, int] @@ -57,6 +64,7 @@ class ClassMap(BaseModel): def label_id_to_class(self) -> Dict[int, str]: return {v: k for k, v in self.class_to_label_id.items()} + class ModelMetadata(BaseModel): signatures: ModelSignature artifact: ModelArtifact @@ -67,31 +75,54 @@ class ModelMetadata(BaseModel): ml_model_type: Optional[str] = None ml_model_processor_type: Optional[Literal["cpu", "gpu", "tpu", "mps"]] = None ml_model_learning_approach: Optional[str] = None - ml_model_prediction_type: Optional[Literal["object-detection", "classification", "segmentation", "regression"]] = None + ml_model_prediction_type: Optional[ + Literal["object-detection", "classification", "segmentation", "regression"] + ] = None ml_model_architecture: Optional[str] = None class Config: arbitrary_types_allowed = True + # Functions to create, serialize, and deserialize ModelMetadata def create_metadata(): - input_sig = TensorSignature(name='input_tensor', dtype='float32', shape=(-1, 13, 64, 64)) - output_sig = TensorSignature(name='output_tensor', dtype='float32', shape=(-1, 10)) + input_sig = TensorSignature( + name="input_tensor", dtype="float32", shape=(-1, 13, 64, 64) + ) + output_sig = TensorSignature(name="output_tensor", dtype="float32", shape=(-1, 10)) model_sig = ModelSignature(inputs=[input_sig], outputs=[output_sig]) model_artifact = ModelArtifact(path="s3://example/s3/uri/model.pt") - class_map = ClassMap(class_to_label_id={ - 'Annual Crop': 0, 'Forest': 1, 'Herbaceous Vegetation': 2, 'Highway': 3, - 'Industrial Buildings': 4, 'Pasture': 5, 'Permanent Crop': 6, - 'Residential Buildings': 7, 'River': 8, 'SeaLake': 9 - }) - return ModelMetadata(name="eurosat", class_map=class_map, signatures=model_sig, artifact=model_artifact, ml_model_processor_type="cpu") + class_map = ClassMap( + class_to_label_id={ + "Annual Crop": 0, + "Forest": 1, + "Herbaceous Vegetation": 2, + "Highway": 3, + "Industrial Buildings": 4, + "Pasture": 5, + "Permanent Crop": 6, + "Residential Buildings": 7, + "River": 8, + "SeaLake": 9, + } + ) + return ModelMetadata( + name="eurosat", + class_map=class_map, + signatures=model_sig, + artifact=model_artifact, + ml_model_processor_type="cpu", + ) + def metadata_json(metadata: ModelMetadata) -> str: return metadata.model_dump_json(indent=2) + def model_metadata_json_operations(json_str: str) -> ModelMetadata: return ModelMetadata.model_validate_json(json_str) + # Running the functions end-to-end metadata = create_metadata() json_str = metadata_json(metadata) diff --git a/stac_model/poetry.lock b/stac_model/poetry.lock index af058e3..15f9d0f 100644 --- a/stac_model/poetry.lock +++ b/stac_model/poetry.lock @@ -13,36 +13,36 @@ files = [ [[package]] name = "bandit" -version = "1.7.6" +version = "1.7.7" description = "Security oriented static analyser for python code." optional = false python-versions = ">=3.8" files = [ - {file = "bandit-1.7.6-py3-none-any.whl", hash = "sha256:36da17c67fc87579a5d20c323c8d0b1643a890a2b93f00b3d1229966624694ff"}, - {file = "bandit-1.7.6.tar.gz", hash = "sha256:72ce7bc9741374d96fb2f1c9a8960829885f1243ffde743de70a19cee353e8f3"}, + {file = "bandit-1.7.7-py3-none-any.whl", hash = "sha256:17e60786a7ea3c9ec84569fd5aee09936d116cb0cb43151023258340dbffb7ed"}, + {file = "bandit-1.7.7.tar.gz", hash = "sha256:527906bec6088cb499aae31bc962864b4e77569e9d529ee51df3a93b4b8ab28a"}, ] [package.dependencies] colorama = {version = ">=0.3.9", markers = "platform_system == \"Windows\""} -GitPython = ">=3.1.30" PyYAML = ">=5.3.1" rich = "*" stevedore = ">=1.20.0" [package.extras] -test = ["beautifulsoup4 (>=4.8.0)", "coverage (>=4.5.4)", "fixtures (>=3.0.0)", "flake8 (>=4.0.0)", "pylint (==1.9.4)", "stestr (>=2.5.0)", "testscenarios (>=0.5.0)", "testtools (>=2.3.0)", "tomli (>=1.1.0)"] +baseline = ["GitPython (>=3.1.30)"] +test = ["beautifulsoup4 (>=4.8.0)", "coverage (>=4.5.4)", "fixtures (>=3.0.0)", "flake8 (>=4.0.0)", "pylint (==1.9.4)", "stestr (>=2.5.0)", "testscenarios (>=0.5.0)", "testtools (>=2.3.0)"] toml = ["tomli (>=1.1.0)"] yaml = ["PyYAML"] [[package]] name = "certifi" -version = "2023.11.17" +version = "2024.2.2" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.6" files = [ - {file = "certifi-2023.11.17-py3-none-any.whl", hash = "sha256:e036ab49d5b79556f99cfc2d9320b34cfbe5be05c5871b51de9329f0603b0474"}, - {file = "certifi-2023.11.17.tar.gz", hash = "sha256:9b469f3a900bf28dc19b8cfbf8019bf47f7fdd1a65a1d4ffb98fc14166beb4d1"}, + {file = "certifi-2024.2.2-py3-none-any.whl", hash = "sha256:dc383c07b76109f368f6106eee2b593b04a011ea4d55f652c6ca24a754d1cdd1"}, + {file = "certifi-2024.2.2.tar.gz", hash = "sha256:0569859f95fc761b18b45ef421b1290a0f65f147e92a1e5eb3e635f9a5e4e66f"}, ] [[package]] @@ -182,63 +182,63 @@ files = [ [[package]] name = "coverage" -version = "7.4.0" +version = "7.4.2" description = "Code coverage measurement for Python" optional = false python-versions = ">=3.8" files = [ - {file = "coverage-7.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:36b0ea8ab20d6a7564e89cb6135920bc9188fb5f1f7152e94e8300b7b189441a"}, - {file = "coverage-7.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0676cd0ba581e514b7f726495ea75aba3eb20899d824636c6f59b0ed2f88c471"}, - {file = "coverage-7.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0ca5c71a5a1765a0f8f88022c52b6b8be740e512980362f7fdbb03725a0d6b9"}, - {file = "coverage-7.4.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a7c97726520f784239f6c62506bc70e48d01ae71e9da128259d61ca5e9788516"}, - {file = "coverage-7.4.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:815ac2d0f3398a14286dc2cea223a6f338109f9ecf39a71160cd1628786bc6f5"}, - {file = "coverage-7.4.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:80b5ee39b7f0131ebec7968baa9b2309eddb35b8403d1869e08f024efd883566"}, - {file = "coverage-7.4.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:5b2ccb7548a0b65974860a78c9ffe1173cfb5877460e5a229238d985565574ae"}, - {file = "coverage-7.4.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:995ea5c48c4ebfd898eacb098164b3cc826ba273b3049e4a889658548e321b43"}, - {file = "coverage-7.4.0-cp310-cp310-win32.whl", hash = "sha256:79287fd95585ed36e83182794a57a46aeae0b64ca53929d1176db56aacc83451"}, - {file = "coverage-7.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:5b14b4f8760006bfdb6e08667af7bc2d8d9bfdb648351915315ea17645347137"}, - {file = "coverage-7.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:04387a4a6ecb330c1878907ce0dc04078ea72a869263e53c72a1ba5bbdf380ca"}, - {file = "coverage-7.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ea81d8f9691bb53f4fb4db603203029643caffc82bf998ab5b59ca05560f4c06"}, - {file = "coverage-7.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:74775198b702868ec2d058cb92720a3c5a9177296f75bd97317c787daf711505"}, - {file = "coverage-7.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:76f03940f9973bfaee8cfba70ac991825611b9aac047e5c80d499a44079ec0bc"}, - {file = "coverage-7.4.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:485e9f897cf4856a65a57c7f6ea3dc0d4e6c076c87311d4bc003f82cfe199d25"}, - {file = "coverage-7.4.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:6ae8c9d301207e6856865867d762a4b6fd379c714fcc0607a84b92ee63feff70"}, - {file = "coverage-7.4.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:bf477c355274a72435ceb140dc42de0dc1e1e0bf6e97195be30487d8eaaf1a09"}, - {file = "coverage-7.4.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:83c2dda2666fe32332f8e87481eed056c8b4d163fe18ecc690b02802d36a4d26"}, - {file = "coverage-7.4.0-cp311-cp311-win32.whl", hash = "sha256:697d1317e5290a313ef0d369650cfee1a114abb6021fa239ca12b4849ebbd614"}, - {file = "coverage-7.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:26776ff6c711d9d835557ee453082025d871e30b3fd6c27fcef14733f67f0590"}, - {file = "coverage-7.4.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:13eaf476ec3e883fe3e5fe3707caeb88268a06284484a3daf8250259ef1ba143"}, - {file = "coverage-7.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:846f52f46e212affb5bcf131c952fb4075b55aae6b61adc9856222df89cbe3e2"}, - {file = "coverage-7.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:26f66da8695719ccf90e794ed567a1549bb2644a706b41e9f6eae6816b398c4a"}, - {file = "coverage-7.4.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:164fdcc3246c69a6526a59b744b62e303039a81e42cfbbdc171c91a8cc2f9446"}, - {file = "coverage-7.4.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:316543f71025a6565677d84bc4df2114e9b6a615aa39fb165d697dba06a54af9"}, - {file = "coverage-7.4.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:bb1de682da0b824411e00a0d4da5a784ec6496b6850fdf8c865c1d68c0e318dd"}, - {file = "coverage-7.4.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:0e8d06778e8fbffccfe96331a3946237f87b1e1d359d7fbe8b06b96c95a5407a"}, - {file = "coverage-7.4.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a56de34db7b7ff77056a37aedded01b2b98b508227d2d0979d373a9b5d353daa"}, - {file = "coverage-7.4.0-cp312-cp312-win32.whl", hash = "sha256:51456e6fa099a8d9d91497202d9563a320513fcf59f33991b0661a4a6f2ad450"}, - {file = "coverage-7.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:cd3c1e4cb2ff0083758f09be0f77402e1bdf704adb7f89108007300a6da587d0"}, - {file = "coverage-7.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e9d1bf53c4c8de58d22e0e956a79a5b37f754ed1ffdbf1a260d9dcfa2d8a325e"}, - {file = "coverage-7.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:109f5985182b6b81fe33323ab4707011875198c41964f014579cf82cebf2bb85"}, - {file = "coverage-7.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3cc9d4bc55de8003663ec94c2f215d12d42ceea128da8f0f4036235a119c88ac"}, - {file = "coverage-7.4.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cc6d65b21c219ec2072c1293c505cf36e4e913a3f936d80028993dd73c7906b1"}, - {file = "coverage-7.4.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5a10a4920def78bbfff4eff8a05c51be03e42f1c3735be42d851f199144897ba"}, - {file = "coverage-7.4.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b8e99f06160602bc64da35158bb76c73522a4010f0649be44a4e167ff8555952"}, - {file = "coverage-7.4.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:7d360587e64d006402b7116623cebf9d48893329ef035278969fa3bbf75b697e"}, - {file = "coverage-7.4.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:29f3abe810930311c0b5d1a7140f6395369c3db1be68345638c33eec07535105"}, - {file = "coverage-7.4.0-cp38-cp38-win32.whl", hash = "sha256:5040148f4ec43644702e7b16ca864c5314ccb8ee0751ef617d49aa0e2d6bf4f2"}, - {file = "coverage-7.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:9864463c1c2f9cb3b5db2cf1ff475eed2f0b4285c2aaf4d357b69959941aa555"}, - {file = "coverage-7.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:936d38794044b26c99d3dd004d8af0035ac535b92090f7f2bb5aa9c8e2f5cd42"}, - {file = "coverage-7.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:799c8f873794a08cdf216aa5d0531c6a3747793b70c53f70e98259720a6fe2d7"}, - {file = "coverage-7.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e7defbb9737274023e2d7af02cac77043c86ce88a907c58f42b580a97d5bcca9"}, - {file = "coverage-7.4.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a1526d265743fb49363974b7aa8d5899ff64ee07df47dd8d3e37dcc0818f09ed"}, - {file = "coverage-7.4.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf635a52fc1ea401baf88843ae8708591aa4adff875e5c23220de43b1ccf575c"}, - {file = "coverage-7.4.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:756ded44f47f330666843b5781be126ab57bb57c22adbb07d83f6b519783b870"}, - {file = "coverage-7.4.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:0eb3c2f32dabe3a4aaf6441dde94f35687224dfd7eb2a7f47f3fd9428e421058"}, - {file = "coverage-7.4.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:bfd5db349d15c08311702611f3dccbef4b4e2ec148fcc636cf8739519b4a5c0f"}, - {file = "coverage-7.4.0-cp39-cp39-win32.whl", hash = "sha256:53d7d9158ee03956e0eadac38dfa1ec8068431ef8058fe6447043db1fb40d932"}, - {file = "coverage-7.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:cfd2a8b6b0d8e66e944d47cdec2f47c48fef2ba2f2dff5a9a75757f64172857e"}, - {file = "coverage-7.4.0-pp38.pp39.pp310-none-any.whl", hash = "sha256:c530833afc4707fe48524a44844493f36d8727f04dcce91fb978c414a8556cc6"}, - {file = "coverage-7.4.0.tar.gz", hash = "sha256:707c0f58cb1712b8809ece32b68996ee1e609f71bd14615bd8f87a1293cb610e"}, + {file = "coverage-7.4.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bf54c3e089179d9d23900e3efc86d46e4431188d9a657f345410eecdd0151f50"}, + {file = "coverage-7.4.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fe6e43c8b510719b48af7db9631b5fbac910ade4bd90e6378c85ac5ac706382c"}, + {file = "coverage-7.4.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b98c89db1b150d851a7840142d60d01d07677a18f0f46836e691c38134ed18b"}, + {file = "coverage-7.4.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c5f9683be6a5b19cd776ee4e2f2ffb411424819c69afab6b2db3a0a364ec6642"}, + {file = "coverage-7.4.2-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78cdcbf7b9cb83fe047ee09298e25b1cd1636824067166dc97ad0543b079d22f"}, + {file = "coverage-7.4.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:2599972b21911111114100d362aea9e70a88b258400672626efa2b9e2179609c"}, + {file = "coverage-7.4.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:ef00d31b7569ed3cb2036f26565f1984b9fc08541731ce01012b02a4c238bf03"}, + {file = "coverage-7.4.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:20a875bfd8c282985c4720c32aa05056f77a68e6d8bbc5fe8632c5860ee0b49b"}, + {file = "coverage-7.4.2-cp310-cp310-win32.whl", hash = "sha256:b3f2b1eb229f23c82898eedfc3296137cf1f16bb145ceab3edfd17cbde273fb7"}, + {file = "coverage-7.4.2-cp310-cp310-win_amd64.whl", hash = "sha256:7df95fdd1432a5d2675ce630fef5f239939e2b3610fe2f2b5bf21fa505256fa3"}, + {file = "coverage-7.4.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a8ddbd158e069dded57738ea69b9744525181e99974c899b39f75b2b29a624e2"}, + {file = "coverage-7.4.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:81a5fb41b0d24447a47543b749adc34d45a2cf77b48ca74e5bf3de60a7bd9edc"}, + {file = "coverage-7.4.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2412e98e70f16243be41d20836abd5f3f32edef07cbf8f407f1b6e1ceae783ac"}, + {file = "coverage-7.4.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ddb79414c15c6f03f56cc68fa06994f047cf20207c31b5dad3f6bab54a0f66ef"}, + {file = "coverage-7.4.2-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cf89ab85027427d351f1de918aff4b43f4eb5f33aff6835ed30322a86ac29c9e"}, + {file = "coverage-7.4.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a178b7b1ac0f1530bb28d2e51f88c0bab3e5949835851a60dda80bff6052510c"}, + {file = "coverage-7.4.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:06fe398145a2e91edaf1ab4eee66149c6776c6b25b136f4a86fcbbb09512fd10"}, + {file = "coverage-7.4.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:18cac867950943fe93d6cd56a67eb7dcd2d4a781a40f4c1e25d6f1ed98721a55"}, + {file = "coverage-7.4.2-cp311-cp311-win32.whl", hash = "sha256:f72cdd2586f9a769570d4b5714a3837b3a59a53b096bb954f1811f6a0afad305"}, + {file = "coverage-7.4.2-cp311-cp311-win_amd64.whl", hash = "sha256:d779a48fac416387dd5673fc5b2d6bd903ed903faaa3247dc1865c65eaa5a93e"}, + {file = "coverage-7.4.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:adbdfcda2469d188d79771d5696dc54fab98a16d2ef7e0875013b5f56a251047"}, + {file = "coverage-7.4.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ac4bab32f396b03ebecfcf2971668da9275b3bb5f81b3b6ba96622f4ef3f6e17"}, + {file = "coverage-7.4.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:006d220ba2e1a45f1de083d5022d4955abb0aedd78904cd5a779b955b019ec73"}, + {file = "coverage-7.4.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3733545eb294e5ad274abe131d1e7e7de4ba17a144505c12feca48803fea5f64"}, + {file = "coverage-7.4.2-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42a9e754aa250fe61f0f99986399cec086d7e7a01dd82fd863a20af34cbce962"}, + {file = "coverage-7.4.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:2ed37e16cf35c8d6e0b430254574b8edd242a367a1b1531bd1adc99c6a5e00fe"}, + {file = "coverage-7.4.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:b953275d4edfab6cc0ed7139fa773dfb89e81fee1569a932f6020ce7c6da0e8f"}, + {file = "coverage-7.4.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:32b4ab7e6c924f945cbae5392832e93e4ceb81483fd6dc4aa8fb1a97b9d3e0e1"}, + {file = "coverage-7.4.2-cp312-cp312-win32.whl", hash = "sha256:f5df76c58977bc35a49515b2fbba84a1d952ff0ec784a4070334dfbec28a2def"}, + {file = "coverage-7.4.2-cp312-cp312-win_amd64.whl", hash = "sha256:34423abbaad70fea9d0164add189eabaea679068ebdf693baa5c02d03e7db244"}, + {file = "coverage-7.4.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5b11f9c6587668e495cc7365f85c93bed34c3a81f9f08b0920b87a89acc13469"}, + {file = "coverage-7.4.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:51593a1f05c39332f623d64d910445fdec3d2ac2d96b37ce7f331882d5678ddf"}, + {file = "coverage-7.4.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69f1665165ba2fe7614e2f0c1aed71e14d83510bf67e2ee13df467d1c08bf1e8"}, + {file = "coverage-7.4.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3c8bbb95a699c80a167478478efe5e09ad31680931ec280bf2087905e3b95ec"}, + {file = "coverage-7.4.2-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:175f56572f25e1e1201d2b3e07b71ca4d201bf0b9cb8fad3f1dfae6a4188de86"}, + {file = "coverage-7.4.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8562ca91e8c40864942615b1d0b12289d3e745e6b2da901d133f52f2d510a1e3"}, + {file = "coverage-7.4.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:d9a1ef0f173e1a19738f154fb3644f90d0ada56fe6c9b422f992b04266c55d5a"}, + {file = "coverage-7.4.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:f40ac873045db4fd98a6f40387d242bde2708a3f8167bd967ccd43ad46394ba2"}, + {file = "coverage-7.4.2-cp38-cp38-win32.whl", hash = "sha256:d1b750a8409bec61caa7824bfd64a8074b6d2d420433f64c161a8335796c7c6b"}, + {file = "coverage-7.4.2-cp38-cp38-win_amd64.whl", hash = "sha256:b4ae777bebaed89e3a7e80c4a03fac434a98a8abb5251b2a957d38fe3fd30088"}, + {file = "coverage-7.4.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3ff7f92ae5a456101ca8f48387fd3c56eb96353588e686286f50633a611afc95"}, + {file = "coverage-7.4.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:861d75402269ffda0b33af94694b8e0703563116b04c681b1832903fac8fd647"}, + {file = "coverage-7.4.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3507427d83fa961cbd73f11140f4a5ce84208d31756f7238d6257b2d3d868405"}, + {file = "coverage-7.4.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bf711d517e21fb5bc429f5c4308fbc430a8585ff2a43e88540264ae87871e36a"}, + {file = "coverage-7.4.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c00e54f0bd258ab25e7f731ca1d5144b0bf7bec0051abccd2bdcff65fa3262c9"}, + {file = "coverage-7.4.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:f8e845d894e39fb53834da826078f6dc1a933b32b1478cf437007367efaf6f6a"}, + {file = "coverage-7.4.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:840456cb1067dc350af9080298c7c2cfdddcedc1cb1e0b30dceecdaf7be1a2d3"}, + {file = "coverage-7.4.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c11ca2df2206a4e3e4c4567f52594637392ed05d7c7fb73b4ea1c658ba560265"}, + {file = "coverage-7.4.2-cp39-cp39-win32.whl", hash = "sha256:3ff5bdb08d8938d336ce4088ca1a1e4b6c8cd3bef8bb3a4c0eb2f37406e49643"}, + {file = "coverage-7.4.2-cp39-cp39-win_amd64.whl", hash = "sha256:ac9e95cefcf044c98d4e2c829cd0669918585755dd9a92e28a1a7012322d0a95"}, + {file = "coverage-7.4.2-pp38.pp39.pp310-none-any.whl", hash = "sha256:f593a4a90118d99014517c2679e04a4ef5aee2d81aa05c26c734d271065efcb6"}, + {file = "coverage-7.4.2.tar.gz", hash = "sha256:1a5ee18e3a8d766075ce9314ed1cb695414bae67df6a4b0805f5137d93d6f1cb"}, ] [package.dependencies] @@ -318,46 +318,15 @@ docs = ["furo (>=2023.9.10)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1 testing = ["covdefaults (>=2.3)", "coverage (>=7.3.2)", "diff-cover (>=8)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)", "pytest-timeout (>=2.2)"] typing = ["typing-extensions (>=4.8)"] -[[package]] -name = "gitdb" -version = "4.0.11" -description = "Git Object Database" -optional = false -python-versions = ">=3.7" -files = [ - {file = "gitdb-4.0.11-py3-none-any.whl", hash = "sha256:81a3407ddd2ee8df444cbacea00e2d038e40150acfa3001696fe0dcf1d3adfa4"}, - {file = "gitdb-4.0.11.tar.gz", hash = "sha256:bf5421126136d6d0af55bc1e7c1af1c397a34f5b7bd79e776cd3e89785c2b04b"}, -] - -[package.dependencies] -smmap = ">=3.0.1,<6" - -[[package]] -name = "gitpython" -version = "3.1.40" -description = "GitPython is a Python library used to interact with Git repositories" -optional = false -python-versions = ">=3.7" -files = [ - {file = "GitPython-3.1.40-py3-none-any.whl", hash = "sha256:cf14627d5a8049ffbf49915732e5eddbe8134c3bdb9d476e6182b676fc573f8a"}, - {file = "GitPython-3.1.40.tar.gz", hash = "sha256:22b126e9ffb671fdd0c129796343a02bf67bf2994b35449ffc9321aa755e18a4"}, -] - -[package.dependencies] -gitdb = ">=4.0.1,<5" - -[package.extras] -test = ["black", "coverage[toml]", "ddt (>=1.1.1,!=1.4.3)", "mock", "mypy", "pre-commit", "pytest", "pytest-cov", "pytest-instafail", "pytest-subtests", "pytest-sugar"] - [[package]] name = "identify" -version = "2.5.33" +version = "2.5.35" description = "File identification library for Python" optional = false python-versions = ">=3.8" files = [ - {file = "identify-2.5.33-py2.py3-none-any.whl", hash = "sha256:d40ce5fcd762817627670da8a7d8d8e65f24342d14539c59488dc603bf662e34"}, - {file = "identify-2.5.33.tar.gz", hash = "sha256:161558f9fe4559e1557e1bff323e8631f6a0e4837f7497767c1782832f16b62d"}, + {file = "identify-2.5.35-py2.py3-none-any.whl", hash = "sha256:c4de0081837b211594f8e877a6b4fad7ca32bbfc1a9307fdd61c28bfe923f13e"}, + {file = "identify-2.5.35.tar.gz", hash = "sha256:10a7ca245cfcd756a554a7288159f72ff105ad233c7c4b9c6f0f4d108f5f6791"}, ] [package.extras] @@ -492,47 +461,47 @@ setuptools = "*" [[package]] name = "numpy" -version = "1.26.3" +version = "1.26.4" description = "Fundamental package for array computing in Python" optional = false python-versions = ">=3.9" files = [ - {file = "numpy-1.26.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:806dd64230dbbfaca8a27faa64e2f414bf1c6622ab78cc4264f7f5f028fee3bf"}, - {file = "numpy-1.26.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:02f98011ba4ab17f46f80f7f8f1c291ee7d855fcef0a5a98db80767a468c85cd"}, - {file = "numpy-1.26.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6d45b3ec2faed4baca41c76617fcdcfa4f684ff7a151ce6fc78ad3b6e85af0a6"}, - {file = "numpy-1.26.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bdd2b45bf079d9ad90377048e2747a0c82351989a2165821f0c96831b4a2a54b"}, - {file = "numpy-1.26.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:211ddd1e94817ed2d175b60b6374120244a4dd2287f4ece45d49228b4d529178"}, - {file = "numpy-1.26.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b1240f767f69d7c4c8a29adde2310b871153df9b26b5cb2b54a561ac85146485"}, - {file = "numpy-1.26.3-cp310-cp310-win32.whl", hash = "sha256:21a9484e75ad018974a2fdaa216524d64ed4212e418e0a551a2d83403b0531d3"}, - {file = "numpy-1.26.3-cp310-cp310-win_amd64.whl", hash = "sha256:9e1591f6ae98bcfac2a4bbf9221c0b92ab49762228f38287f6eeb5f3f55905ce"}, - {file = "numpy-1.26.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b831295e5472954104ecb46cd98c08b98b49c69fdb7040483aff799a755a7374"}, - {file = "numpy-1.26.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9e87562b91f68dd8b1c39149d0323b42e0082db7ddb8e934ab4c292094d575d6"}, - {file = "numpy-1.26.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c66d6fec467e8c0f975818c1796d25c53521124b7cfb760114be0abad53a0a2"}, - {file = "numpy-1.26.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f25e2811a9c932e43943a2615e65fc487a0b6b49218899e62e426e7f0a57eeda"}, - {file = "numpy-1.26.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:af36e0aa45e25c9f57bf684b1175e59ea05d9a7d3e8e87b7ae1a1da246f2767e"}, - {file = "numpy-1.26.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:51c7f1b344f302067b02e0f5b5d2daa9ed4a721cf49f070280ac202738ea7f00"}, - {file = "numpy-1.26.3-cp311-cp311-win32.whl", hash = "sha256:7ca4f24341df071877849eb2034948459ce3a07915c2734f1abb4018d9c49d7b"}, - {file = "numpy-1.26.3-cp311-cp311-win_amd64.whl", hash = "sha256:39763aee6dfdd4878032361b30b2b12593fb445ddb66bbac802e2113eb8a6ac4"}, - {file = "numpy-1.26.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:a7081fd19a6d573e1a05e600c82a1c421011db7935ed0d5c483e9dd96b99cf13"}, - {file = "numpy-1.26.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:12c70ac274b32bc00c7f61b515126c9205323703abb99cd41836e8125ea0043e"}, - {file = "numpy-1.26.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7f784e13e598e9594750b2ef6729bcd5a47f6cfe4a12cca13def35e06d8163e3"}, - {file = "numpy-1.26.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5f24750ef94d56ce6e33e4019a8a4d68cfdb1ef661a52cdaee628a56d2437419"}, - {file = "numpy-1.26.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:77810ef29e0fb1d289d225cabb9ee6cf4d11978a00bb99f7f8ec2132a84e0166"}, - {file = "numpy-1.26.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8ed07a90f5450d99dad60d3799f9c03c6566709bd53b497eb9ccad9a55867f36"}, - {file = "numpy-1.26.3-cp312-cp312-win32.whl", hash = "sha256:f73497e8c38295aaa4741bdfa4fda1a5aedda5473074369eca10626835445511"}, - {file = "numpy-1.26.3-cp312-cp312-win_amd64.whl", hash = "sha256:da4b0c6c699a0ad73c810736303f7fbae483bcb012e38d7eb06a5e3b432c981b"}, - {file = "numpy-1.26.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1666f634cb3c80ccbd77ec97bc17337718f56d6658acf5d3b906ca03e90ce87f"}, - {file = "numpy-1.26.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:18c3319a7d39b2c6a9e3bb75aab2304ab79a811ac0168a671a62e6346c29b03f"}, - {file = "numpy-1.26.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0b7e807d6888da0db6e7e75838444d62495e2b588b99e90dd80c3459594e857b"}, - {file = "numpy-1.26.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b4d362e17bcb0011738c2d83e0a65ea8ce627057b2fdda37678f4374a382a137"}, - {file = "numpy-1.26.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b8c275f0ae90069496068c714387b4a0eba5d531aace269559ff2b43655edd58"}, - {file = "numpy-1.26.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:cc0743f0302b94f397a4a65a660d4cd24267439eb16493fb3caad2e4389bccbb"}, - {file = "numpy-1.26.3-cp39-cp39-win32.whl", hash = "sha256:9bc6d1a7f8cedd519c4b7b1156d98e051b726bf160715b769106661d567b3f03"}, - {file = "numpy-1.26.3-cp39-cp39-win_amd64.whl", hash = "sha256:867e3644e208c8922a3be26fc6bbf112a035f50f0a86497f98f228c50c607bb2"}, - {file = "numpy-1.26.3-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:3c67423b3703f8fbd90f5adaa37f85b5794d3366948efe9a5190a5f3a83fc34e"}, - {file = "numpy-1.26.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:46f47ee566d98849323f01b349d58f2557f02167ee301e5e28809a8c0e27a2d0"}, - {file = "numpy-1.26.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a8474703bffc65ca15853d5fd4d06b18138ae90c17c8d12169968e998e448bb5"}, - {file = "numpy-1.26.3.tar.gz", hash = "sha256:697df43e2b6310ecc9d95f05d5ef20eacc09c7c4ecc9da3f235d39e71b7da1e4"}, + {file = "numpy-1.26.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9ff0f4f29c51e2803569d7a51c2304de5554655a60c5d776e35b4a41413830d0"}, + {file = "numpy-1.26.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2e4ee3380d6de9c9ec04745830fd9e2eccb3e6cf790d39d7b98ffd19b0dd754a"}, + {file = "numpy-1.26.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d209d8969599b27ad20994c8e41936ee0964e6da07478d6c35016bc386b66ad4"}, + {file = "numpy-1.26.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ffa75af20b44f8dba823498024771d5ac50620e6915abac414251bd971b4529f"}, + {file = "numpy-1.26.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:62b8e4b1e28009ef2846b4c7852046736bab361f7aeadeb6a5b89ebec3c7055a"}, + {file = "numpy-1.26.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a4abb4f9001ad2858e7ac189089c42178fcce737e4169dc61321660f1a96c7d2"}, + {file = "numpy-1.26.4-cp310-cp310-win32.whl", hash = "sha256:bfe25acf8b437eb2a8b2d49d443800a5f18508cd811fea3181723922a8a82b07"}, + {file = "numpy-1.26.4-cp310-cp310-win_amd64.whl", hash = "sha256:b97fe8060236edf3662adfc2c633f56a08ae30560c56310562cb4f95500022d5"}, + {file = "numpy-1.26.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4c66707fabe114439db9068ee468c26bbdf909cac0fb58686a42a24de1760c71"}, + {file = "numpy-1.26.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:edd8b5fe47dab091176d21bb6de568acdd906d1887a4584a15a9a96a1dca06ef"}, + {file = "numpy-1.26.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7ab55401287bfec946ced39700c053796e7cc0e3acbef09993a9ad2adba6ca6e"}, + {file = "numpy-1.26.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:666dbfb6ec68962c033a450943ded891bed2d54e6755e35e5835d63f4f6931d5"}, + {file = "numpy-1.26.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:96ff0b2ad353d8f990b63294c8986f1ec3cb19d749234014f4e7eb0112ceba5a"}, + {file = "numpy-1.26.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:60dedbb91afcbfdc9bc0b1f3f402804070deed7392c23eb7a7f07fa857868e8a"}, + {file = "numpy-1.26.4-cp311-cp311-win32.whl", hash = "sha256:1af303d6b2210eb850fcf03064d364652b7120803a0b872f5211f5234b399f20"}, + {file = "numpy-1.26.4-cp311-cp311-win_amd64.whl", hash = "sha256:cd25bcecc4974d09257ffcd1f098ee778f7834c3ad767fe5db785be9a4aa9cb2"}, + {file = "numpy-1.26.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b3ce300f3644fb06443ee2222c2201dd3a89ea6040541412b8fa189341847218"}, + {file = "numpy-1.26.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:03a8c78d01d9781b28a6989f6fa1bb2c4f2d51201cf99d3dd875df6fbd96b23b"}, + {file = "numpy-1.26.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9fad7dcb1aac3c7f0584a5a8133e3a43eeb2fe127f47e3632d43d677c66c102b"}, + {file = "numpy-1.26.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:675d61ffbfa78604709862923189bad94014bef562cc35cf61d3a07bba02a7ed"}, + {file = "numpy-1.26.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ab47dbe5cc8210f55aa58e4805fe224dac469cde56b9f731a4c098b91917159a"}, + {file = "numpy-1.26.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:1dda2e7b4ec9dd512f84935c5f126c8bd8b9f2fc001e9f54af255e8c5f16b0e0"}, + {file = "numpy-1.26.4-cp312-cp312-win32.whl", hash = "sha256:50193e430acfc1346175fcbdaa28ffec49947a06918b7b92130744e81e640110"}, + {file = "numpy-1.26.4-cp312-cp312-win_amd64.whl", hash = "sha256:08beddf13648eb95f8d867350f6a018a4be2e5ad54c8d8caed89ebca558b2818"}, + {file = "numpy-1.26.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7349ab0fa0c429c82442a27a9673fc802ffdb7c7775fad780226cb234965e53c"}, + {file = "numpy-1.26.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:52b8b60467cd7dd1e9ed082188b4e6bb35aa5cdd01777621a1658910745b90be"}, + {file = "numpy-1.26.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d5241e0a80d808d70546c697135da2c613f30e28251ff8307eb72ba696945764"}, + {file = "numpy-1.26.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f870204a840a60da0b12273ef34f7051e98c3b5961b61b0c2c1be6dfd64fbcd3"}, + {file = "numpy-1.26.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:679b0076f67ecc0138fd2ede3a8fd196dddc2ad3254069bcb9faf9a79b1cebcd"}, + {file = "numpy-1.26.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:47711010ad8555514b434df65f7d7b076bb8261df1ca9bb78f53d3b2db02e95c"}, + {file = "numpy-1.26.4-cp39-cp39-win32.whl", hash = "sha256:a354325ee03388678242a4d7ebcd08b5c727033fcff3b2f536aea978e15ee9e6"}, + {file = "numpy-1.26.4-cp39-cp39-win_amd64.whl", hash = "sha256:3373d5d70a5fe74a2c1bb6d2cfd9609ecf686d47a2d7b1d37a8f3b6bf6003aea"}, + {file = "numpy-1.26.4-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:afedb719a9dcfc7eaf2287b839d8198e06dcd4cb5d276a3df279231138e83d30"}, + {file = "numpy-1.26.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95a7476c59002f2f6c590b9b7b998306fba6a5aa646b1e22ddfeaf8f78c3a29c"}, + {file = "numpy-1.26.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7e50d0a0cc3189f9cb0aeb3a6a6af18c16f59f004b866cd2be1c14b36134a4a0"}, + {file = "numpy-1.26.4.tar.gz", hash = "sha256:2a02aba9ed12e4ac4eb3ea9421c420301a0c6460d9830d74a9df87efa4912010"}, ] [[package]] @@ -559,28 +528,28 @@ files = [ [[package]] name = "platformdirs" -version = "4.1.0" +version = "4.2.0" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." optional = false python-versions = ">=3.8" files = [ - {file = "platformdirs-4.1.0-py3-none-any.whl", hash = "sha256:11c8f37bcca40db96d8144522d925583bdb7a31f7b0e37e3ed4318400a8e2380"}, - {file = "platformdirs-4.1.0.tar.gz", hash = "sha256:906d548203468492d432bcb294d4bc2fff751bf84971fbb2c10918cc206ee420"}, + {file = "platformdirs-4.2.0-py3-none-any.whl", hash = "sha256:0614df2a2f37e1a662acbd8e2b25b92ccf8632929bc6d43467e17fe89c75e068"}, + {file = "platformdirs-4.2.0.tar.gz", hash = "sha256:ef0cc731df711022c174543cb70a9b5bd22e5a9337c8624ef2c2ceb8ddad8768"}, ] [package.extras] -docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.1)", "sphinx-autodoc-typehints (>=1.24)"] -test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4)", "pytest-cov (>=4.1)", "pytest-mock (>=3.11.1)"] +docs = ["furo (>=2023.9.10)", "proselint (>=0.13)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)"] [[package]] name = "pluggy" -version = "1.3.0" +version = "1.4.0" description = "plugin and hook calling mechanisms for python" optional = false python-versions = ">=3.8" files = [ - {file = "pluggy-1.3.0-py3-none-any.whl", hash = "sha256:d89c696a773f8bd377d18e5ecda92b7a3793cbe66c87060a6fb58c7b6e1061f7"}, - {file = "pluggy-1.3.0.tar.gz", hash = "sha256:cf61ae8f126ac6f7c451172cf30e3e43d3ca77615509771b3a984a0730651e12"}, + {file = "pluggy-1.4.0-py3-none-any.whl", hash = "sha256:7db9f7b503d67d1c5b95f59773ebb58a8c1c288129a88665838012cfb07b8981"}, + {file = "pluggy-1.4.0.tar.gz", hash = "sha256:8c85c2876142a764e5b7548e7d9a0e0ddb46f5185161049a79b7e974454223be"}, ] [package.extras] @@ -629,18 +598,18 @@ files = [ [[package]] name = "pydantic" -version = "2.5.3" +version = "2.6.2" description = "Data validation using Python type hints" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "pydantic-2.5.3-py3-none-any.whl", hash = "sha256:d0caf5954bee831b6bfe7e338c32b9e30c85dfe080c843680783ac2b631673b4"}, - {file = "pydantic-2.5.3.tar.gz", hash = "sha256:b3ef57c62535b0941697cce638c08900d87fcb67e29cfa99e8a68f747f393f7a"}, + {file = "pydantic-2.6.2-py3-none-any.whl", hash = "sha256:37a5432e54b12fecaa1049c5195f3d860a10e01bdfd24f1840ef14bd0d3aeab3"}, + {file = "pydantic-2.6.2.tar.gz", hash = "sha256:a09be1c3d28f3abe37f8a78af58284b236a92ce520105ddc91a6d29ea1176ba7"}, ] [package.dependencies] annotated-types = ">=0.4.0" -pydantic-core = "2.14.6" +pydantic-core = "2.16.3" typing-extensions = ">=4.6.1" [package.extras] @@ -648,116 +617,90 @@ email = ["email-validator (>=2.0.0)"] [[package]] name = "pydantic-core" -version = "2.14.6" +version = "2.16.3" description = "" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "pydantic_core-2.14.6-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:72f9a942d739f09cd42fffe5dc759928217649f070056f03c70df14f5770acf9"}, - {file = "pydantic_core-2.14.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6a31d98c0d69776c2576dda4b77b8e0c69ad08e8b539c25c7d0ca0dc19a50d6c"}, - {file = "pydantic_core-2.14.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5aa90562bc079c6c290f0512b21768967f9968e4cfea84ea4ff5af5d917016e4"}, - {file = "pydantic_core-2.14.6-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:370ffecb5316ed23b667d99ce4debe53ea664b99cc37bfa2af47bc769056d534"}, - {file = "pydantic_core-2.14.6-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f85f3843bdb1fe80e8c206fe6eed7a1caeae897e496542cee499c374a85c6e08"}, - {file = "pydantic_core-2.14.6-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9862bf828112e19685b76ca499b379338fd4c5c269d897e218b2ae8fcb80139d"}, - {file = "pydantic_core-2.14.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:036137b5ad0cb0004c75b579445a1efccd072387a36c7f217bb8efd1afbe5245"}, - {file = "pydantic_core-2.14.6-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:92879bce89f91f4b2416eba4429c7b5ca22c45ef4a499c39f0c5c69257522c7c"}, - {file = "pydantic_core-2.14.6-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0c08de15d50fa190d577e8591f0329a643eeaed696d7771760295998aca6bc66"}, - {file = "pydantic_core-2.14.6-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:36099c69f6b14fc2c49d7996cbf4f87ec4f0e66d1c74aa05228583225a07b590"}, - {file = "pydantic_core-2.14.6-cp310-none-win32.whl", hash = "sha256:7be719e4d2ae6c314f72844ba9d69e38dff342bc360379f7c8537c48e23034b7"}, - {file = "pydantic_core-2.14.6-cp310-none-win_amd64.whl", hash = "sha256:36fa402dcdc8ea7f1b0ddcf0df4254cc6b2e08f8cd80e7010d4c4ae6e86b2a87"}, - {file = "pydantic_core-2.14.6-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:dea7fcd62915fb150cdc373212141a30037e11b761fbced340e9db3379b892d4"}, - {file = "pydantic_core-2.14.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ffff855100bc066ff2cd3aa4a60bc9534661816b110f0243e59503ec2df38421"}, - {file = "pydantic_core-2.14.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b027c86c66b8627eb90e57aee1f526df77dc6d8b354ec498be9a757d513b92b"}, - {file = "pydantic_core-2.14.6-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:00b1087dabcee0b0ffd104f9f53d7d3eaddfaa314cdd6726143af6bc713aa27e"}, - {file = "pydantic_core-2.14.6-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:75ec284328b60a4e91010c1acade0c30584f28a1f345bc8f72fe8b9e46ec6a96"}, - {file = "pydantic_core-2.14.6-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7e1f4744eea1501404b20b0ac059ff7e3f96a97d3e3f48ce27a139e053bb370b"}, - {file = "pydantic_core-2.14.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b2602177668f89b38b9f84b7b3435d0a72511ddef45dc14446811759b82235a1"}, - {file = "pydantic_core-2.14.6-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6c8edaea3089bf908dd27da8f5d9e395c5b4dc092dbcce9b65e7156099b4b937"}, - {file = "pydantic_core-2.14.6-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:478e9e7b360dfec451daafe286998d4a1eeaecf6d69c427b834ae771cad4b622"}, - {file = "pydantic_core-2.14.6-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:b6ca36c12a5120bad343eef193cc0122928c5c7466121da7c20f41160ba00ba2"}, - {file = "pydantic_core-2.14.6-cp311-none-win32.whl", hash = "sha256:2b8719037e570639e6b665a4050add43134d80b687288ba3ade18b22bbb29dd2"}, - {file = "pydantic_core-2.14.6-cp311-none-win_amd64.whl", hash = "sha256:78ee52ecc088c61cce32b2d30a826f929e1708f7b9247dc3b921aec367dc1b23"}, - {file = "pydantic_core-2.14.6-cp311-none-win_arm64.whl", hash = "sha256:a19b794f8fe6569472ff77602437ec4430f9b2b9ec7a1105cfd2232f9ba355e6"}, - {file = "pydantic_core-2.14.6-cp312-cp312-macosx_10_7_x86_64.whl", hash = "sha256:667aa2eac9cd0700af1ddb38b7b1ef246d8cf94c85637cbb03d7757ca4c3fdec"}, - {file = "pydantic_core-2.14.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cdee837710ef6b56ebd20245b83799fce40b265b3b406e51e8ccc5b85b9099b7"}, - {file = "pydantic_core-2.14.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2c5bcf3414367e29f83fd66f7de64509a8fd2368b1edf4351e862910727d3e51"}, - {file = "pydantic_core-2.14.6-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:26a92ae76f75d1915806b77cf459811e772d8f71fd1e4339c99750f0e7f6324f"}, - {file = "pydantic_core-2.14.6-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a983cca5ed1dd9a35e9e42ebf9f278d344603bfcb174ff99a5815f953925140a"}, - {file = "pydantic_core-2.14.6-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cb92f9061657287eded380d7dc455bbf115430b3aa4741bdc662d02977e7d0af"}, - {file = "pydantic_core-2.14.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4ace1e220b078c8e48e82c081e35002038657e4b37d403ce940fa679e57113b"}, - {file = "pydantic_core-2.14.6-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ef633add81832f4b56d3b4c9408b43d530dfca29e68fb1b797dcb861a2c734cd"}, - {file = "pydantic_core-2.14.6-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7e90d6cc4aad2cc1f5e16ed56e46cebf4877c62403a311af20459c15da76fd91"}, - {file = "pydantic_core-2.14.6-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:e8a5ac97ea521d7bde7621d86c30e86b798cdecd985723c4ed737a2aa9e77d0c"}, - {file = "pydantic_core-2.14.6-cp312-none-win32.whl", hash = "sha256:f27207e8ca3e5e021e2402ba942e5b4c629718e665c81b8b306f3c8b1ddbb786"}, - {file = "pydantic_core-2.14.6-cp312-none-win_amd64.whl", hash = "sha256:b3e5fe4538001bb82e2295b8d2a39356a84694c97cb73a566dc36328b9f83b40"}, - {file = "pydantic_core-2.14.6-cp312-none-win_arm64.whl", hash = "sha256:64634ccf9d671c6be242a664a33c4acf12882670b09b3f163cd00a24cffbd74e"}, - {file = "pydantic_core-2.14.6-cp37-cp37m-macosx_10_7_x86_64.whl", hash = "sha256:24368e31be2c88bd69340fbfe741b405302993242ccb476c5c3ff48aeee1afe0"}, - {file = "pydantic_core-2.14.6-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:e33b0834f1cf779aa839975f9d8755a7c2420510c0fa1e9fa0497de77cd35d2c"}, - {file = "pydantic_core-2.14.6-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6af4b3f52cc65f8a0bc8b1cd9676f8c21ef3e9132f21fed250f6958bd7223bed"}, - {file = "pydantic_core-2.14.6-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d15687d7d7f40333bd8266f3814c591c2e2cd263fa2116e314f60d82086e353a"}, - {file = "pydantic_core-2.14.6-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:095b707bb287bfd534044166ab767bec70a9bba3175dcdc3371782175c14e43c"}, - {file = "pydantic_core-2.14.6-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:94fc0e6621e07d1e91c44e016cc0b189b48db053061cc22d6298a611de8071bb"}, - {file = "pydantic_core-2.14.6-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ce830e480f6774608dedfd4a90c42aac4a7af0a711f1b52f807130c2e434c06"}, - {file = "pydantic_core-2.14.6-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a306cdd2ad3a7d795d8e617a58c3a2ed0f76c8496fb7621b6cd514eb1532cae8"}, - {file = "pydantic_core-2.14.6-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:2f5fa187bde8524b1e37ba894db13aadd64faa884657473b03a019f625cee9a8"}, - {file = "pydantic_core-2.14.6-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:438027a975cc213a47c5d70672e0d29776082155cfae540c4e225716586be75e"}, - {file = "pydantic_core-2.14.6-cp37-none-win32.whl", hash = "sha256:f96ae96a060a8072ceff4cfde89d261837b4294a4f28b84a28765470d502ccc6"}, - {file = "pydantic_core-2.14.6-cp37-none-win_amd64.whl", hash = "sha256:e646c0e282e960345314f42f2cea5e0b5f56938c093541ea6dbf11aec2862391"}, - {file = "pydantic_core-2.14.6-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:db453f2da3f59a348f514cfbfeb042393b68720787bbef2b4c6068ea362c8149"}, - {file = "pydantic_core-2.14.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3860c62057acd95cc84044e758e47b18dcd8871a328ebc8ccdefd18b0d26a21b"}, - {file = "pydantic_core-2.14.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:36026d8f99c58d7044413e1b819a67ca0e0b8ebe0f25e775e6c3d1fabb3c38fb"}, - {file = "pydantic_core-2.14.6-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8ed1af8692bd8d2a29d702f1a2e6065416d76897d726e45a1775b1444f5928a7"}, - {file = "pydantic_core-2.14.6-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:314ccc4264ce7d854941231cf71b592e30d8d368a71e50197c905874feacc8a8"}, - {file = "pydantic_core-2.14.6-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:982487f8931067a32e72d40ab6b47b1628a9c5d344be7f1a4e668fb462d2da42"}, - {file = "pydantic_core-2.14.6-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2dbe357bc4ddda078f79d2a36fc1dd0494a7f2fad83a0a684465b6f24b46fe80"}, - {file = "pydantic_core-2.14.6-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2f6ffc6701a0eb28648c845f4945a194dc7ab3c651f535b81793251e1185ac3d"}, - {file = "pydantic_core-2.14.6-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:7f5025db12fc6de7bc1104d826d5aee1d172f9ba6ca936bf6474c2148ac336c1"}, - {file = "pydantic_core-2.14.6-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:dab03ed811ed1c71d700ed08bde8431cf429bbe59e423394f0f4055f1ca0ea60"}, - {file = "pydantic_core-2.14.6-cp38-none-win32.whl", hash = "sha256:dfcbebdb3c4b6f739a91769aea5ed615023f3c88cb70df812849aef634c25fbe"}, - {file = "pydantic_core-2.14.6-cp38-none-win_amd64.whl", hash = "sha256:99b14dbea2fdb563d8b5a57c9badfcd72083f6006caf8e126b491519c7d64ca8"}, - {file = "pydantic_core-2.14.6-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:4ce8299b481bcb68e5c82002b96e411796b844d72b3e92a3fbedfe8e19813eab"}, - {file = "pydantic_core-2.14.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b9a9d92f10772d2a181b5ca339dee066ab7d1c9a34ae2421b2a52556e719756f"}, - {file = "pydantic_core-2.14.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fd9e98b408384989ea4ab60206b8e100d8687da18b5c813c11e92fd8212a98e0"}, - {file = "pydantic_core-2.14.6-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4f86f1f318e56f5cbb282fe61eb84767aee743ebe32c7c0834690ebea50c0a6b"}, - {file = "pydantic_core-2.14.6-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86ce5fcfc3accf3a07a729779d0b86c5d0309a4764c897d86c11089be61da160"}, - {file = "pydantic_core-2.14.6-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3dcf1978be02153c6a31692d4fbcc2a3f1db9da36039ead23173bc256ee3b91b"}, - {file = "pydantic_core-2.14.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eedf97be7bc3dbc8addcef4142f4b4164066df0c6f36397ae4aaed3eb187d8ab"}, - {file = "pydantic_core-2.14.6-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d5f916acf8afbcab6bacbb376ba7dc61f845367901ecd5e328fc4d4aef2fcab0"}, - {file = "pydantic_core-2.14.6-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:8a14c192c1d724c3acbfb3f10a958c55a2638391319ce8078cb36c02283959b9"}, - {file = "pydantic_core-2.14.6-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0348b1dc6b76041516e8a854ff95b21c55f5a411c3297d2ca52f5528e49d8411"}, - {file = "pydantic_core-2.14.6-cp39-none-win32.whl", hash = "sha256:de2a0645a923ba57c5527497daf8ec5df69c6eadf869e9cd46e86349146e5975"}, - {file = "pydantic_core-2.14.6-cp39-none-win_amd64.whl", hash = "sha256:aca48506a9c20f68ee61c87f2008f81f8ee99f8d7f0104bff3c47e2d148f89d9"}, - {file = "pydantic_core-2.14.6-pp310-pypy310_pp73-macosx_10_7_x86_64.whl", hash = "sha256:d5c28525c19f5bb1e09511669bb57353d22b94cf8b65f3a8d141c389a55dec95"}, - {file = "pydantic_core-2.14.6-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:78d0768ee59baa3de0f4adac9e3748b4b1fffc52143caebddfd5ea2961595277"}, - {file = "pydantic_core-2.14.6-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b93785eadaef932e4fe9c6e12ba67beb1b3f1e5495631419c784ab87e975670"}, - {file = "pydantic_core-2.14.6-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a874f21f87c485310944b2b2734cd6d318765bcbb7515eead33af9641816506e"}, - {file = "pydantic_core-2.14.6-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b89f4477d915ea43b4ceea6756f63f0288941b6443a2b28c69004fe07fde0d0d"}, - {file = "pydantic_core-2.14.6-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:172de779e2a153d36ee690dbc49c6db568d7b33b18dc56b69a7514aecbcf380d"}, - {file = "pydantic_core-2.14.6-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:dfcebb950aa7e667ec226a442722134539e77c575f6cfaa423f24371bb8d2e94"}, - {file = "pydantic_core-2.14.6-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:55a23dcd98c858c0db44fc5c04fc7ed81c4b4d33c653a7c45ddaebf6563a2f66"}, - {file = "pydantic_core-2.14.6-pp37-pypy37_pp73-macosx_10_7_x86_64.whl", hash = "sha256:4241204e4b36ab5ae466ecec5c4c16527a054c69f99bba20f6f75232a6a534e2"}, - {file = "pydantic_core-2.14.6-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e574de99d735b3fc8364cba9912c2bec2da78775eba95cbb225ef7dda6acea24"}, - {file = "pydantic_core-2.14.6-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1302a54f87b5cd8528e4d6d1bf2133b6aa7c6122ff8e9dc5220fbc1e07bffebd"}, - {file = "pydantic_core-2.14.6-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f8e81e4b55930e5ffab4a68db1af431629cf2e4066dbdbfef65348b8ab804ea8"}, - {file = "pydantic_core-2.14.6-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:c99462ffc538717b3e60151dfaf91125f637e801f5ab008f81c402f1dff0cd0f"}, - {file = "pydantic_core-2.14.6-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e4cf2d5829f6963a5483ec01578ee76d329eb5caf330ecd05b3edd697e7d768a"}, - {file = "pydantic_core-2.14.6-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:cf10b7d58ae4a1f07fccbf4a0a956d705356fea05fb4c70608bb6fa81d103cda"}, - {file = "pydantic_core-2.14.6-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:399ac0891c284fa8eb998bcfa323f2234858f5d2efca3950ae58c8f88830f145"}, - {file = "pydantic_core-2.14.6-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c6a5c79b28003543db3ba67d1df336f253a87d3112dac3a51b94f7d48e4c0e1"}, - {file = "pydantic_core-2.14.6-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:599c87d79cab2a6a2a9df4aefe0455e61e7d2aeede2f8577c1b7c0aec643ee8e"}, - {file = "pydantic_core-2.14.6-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:43e166ad47ba900f2542a80d83f9fc65fe99eb63ceec4debec160ae729824052"}, - {file = "pydantic_core-2.14.6-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:3a0b5db001b98e1c649dd55afa928e75aa4087e587b9524a4992316fa23c9fba"}, - {file = "pydantic_core-2.14.6-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:747265448cb57a9f37572a488a57d873fd96bf51e5bb7edb52cfb37124516da4"}, - {file = "pydantic_core-2.14.6-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:7ebe3416785f65c28f4f9441e916bfc8a54179c8dea73c23023f7086fa601c5d"}, - {file = "pydantic_core-2.14.6-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:86c963186ca5e50d5c8287b1d1c9d3f8f024cbe343d048c5bd282aec2d8641f2"}, - {file = "pydantic_core-2.14.6-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:e0641b506486f0b4cd1500a2a65740243e8670a2549bb02bc4556a83af84ae03"}, - {file = "pydantic_core-2.14.6-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71d72ca5eaaa8d38c8df16b7deb1a2da4f650c41b58bb142f3fb75d5ad4a611f"}, - {file = "pydantic_core-2.14.6-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:27e524624eace5c59af499cd97dc18bb201dc6a7a2da24bfc66ef151c69a5f2a"}, - {file = "pydantic_core-2.14.6-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a3dde6cac75e0b0902778978d3b1646ca9f438654395a362cb21d9ad34b24acf"}, - {file = "pydantic_core-2.14.6-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:00646784f6cd993b1e1c0e7b0fdcbccc375d539db95555477771c27555e3c556"}, - {file = "pydantic_core-2.14.6-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:23598acb8ccaa3d1d875ef3b35cb6376535095e9405d91a3d57a8c7db5d29341"}, - {file = "pydantic_core-2.14.6-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7f41533d7e3cf9520065f610b41ac1c76bc2161415955fbcead4981b22c7611e"}, - {file = "pydantic_core-2.14.6.tar.gz", hash = "sha256:1fd0c1d395372843fba13a51c28e3bb9d59bd7aebfeb17358ffaaa1e4dbbe948"}, + {file = "pydantic_core-2.16.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:75b81e678d1c1ede0785c7f46690621e4c6e63ccd9192af1f0bd9d504bbb6bf4"}, + {file = "pydantic_core-2.16.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9c865a7ee6f93783bd5d781af5a4c43dadc37053a5b42f7d18dc019f8c9d2bd1"}, + {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:162e498303d2b1c036b957a1278fa0899d02b2842f1ff901b6395104c5554a45"}, + {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2f583bd01bbfbff4eaee0868e6fc607efdfcc2b03c1c766b06a707abbc856187"}, + {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b926dd38db1519ed3043a4de50214e0d600d404099c3392f098a7f9d75029ff8"}, + {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:716b542728d4c742353448765aa7cdaa519a7b82f9564130e2b3f6766018c9ec"}, + {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc4ad7f7ee1a13d9cb49d8198cd7d7e3aa93e425f371a68235f784e99741561f"}, + {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bd87f48924f360e5d1c5f770d6155ce0e7d83f7b4e10c2f9ec001c73cf475c99"}, + {file = "pydantic_core-2.16.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0df446663464884297c793874573549229f9eca73b59360878f382a0fc085979"}, + {file = "pydantic_core-2.16.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4df8a199d9f6afc5ae9a65f8f95ee52cae389a8c6b20163762bde0426275b7db"}, + {file = "pydantic_core-2.16.3-cp310-none-win32.whl", hash = "sha256:456855f57b413f077dff513a5a28ed838dbbb15082ba00f80750377eed23d132"}, + {file = "pydantic_core-2.16.3-cp310-none-win_amd64.whl", hash = "sha256:732da3243e1b8d3eab8c6ae23ae6a58548849d2e4a4e03a1924c8ddf71a387cb"}, + {file = "pydantic_core-2.16.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:519ae0312616026bf4cedc0fe459e982734f3ca82ee8c7246c19b650b60a5ee4"}, + {file = "pydantic_core-2.16.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b3992a322a5617ded0a9f23fd06dbc1e4bd7cf39bc4ccf344b10f80af58beacd"}, + {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8d62da299c6ecb04df729e4b5c52dc0d53f4f8430b4492b93aa8de1f541c4aac"}, + {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2acca2be4bb2f2147ada8cac612f8a98fc09f41c89f87add7256ad27332c2fda"}, + {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1b662180108c55dfbf1280d865b2d116633d436cfc0bba82323554873967b340"}, + {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e7c6ed0dc9d8e65f24f5824291550139fe6f37fac03788d4580da0d33bc00c97"}, + {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a6b1bb0827f56654b4437955555dc3aeeebeddc47c2d7ed575477f082622c49e"}, + {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e56f8186d6210ac7ece503193ec84104da7ceb98f68ce18c07282fcc2452e76f"}, + {file = "pydantic_core-2.16.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:936e5db01dd49476fa8f4383c259b8b1303d5dd5fb34c97de194560698cc2c5e"}, + {file = "pydantic_core-2.16.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:33809aebac276089b78db106ee692bdc9044710e26f24a9a2eaa35a0f9fa70ba"}, + {file = "pydantic_core-2.16.3-cp311-none-win32.whl", hash = "sha256:ded1c35f15c9dea16ead9bffcde9bb5c7c031bff076355dc58dcb1cb436c4721"}, + {file = "pydantic_core-2.16.3-cp311-none-win_amd64.whl", hash = "sha256:d89ca19cdd0dd5f31606a9329e309d4fcbb3df860960acec32630297d61820df"}, + {file = "pydantic_core-2.16.3-cp311-none-win_arm64.whl", hash = "sha256:6162f8d2dc27ba21027f261e4fa26f8bcb3cf9784b7f9499466a311ac284b5b9"}, + {file = "pydantic_core-2.16.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:0f56ae86b60ea987ae8bcd6654a887238fd53d1384f9b222ac457070b7ac4cff"}, + {file = "pydantic_core-2.16.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c9bd22a2a639e26171068f8ebb5400ce2c1bc7d17959f60a3b753ae13c632975"}, + {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4204e773b4b408062960e65468d5346bdfe139247ee5f1ca2a378983e11388a2"}, + {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f651dd19363c632f4abe3480a7c87a9773be27cfe1341aef06e8759599454120"}, + {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aaf09e615a0bf98d406657e0008e4a8701b11481840be7d31755dc9f97c44053"}, + {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8e47755d8152c1ab5b55928ab422a76e2e7b22b5ed8e90a7d584268dd49e9c6b"}, + {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:500960cb3a0543a724a81ba859da816e8cf01b0e6aaeedf2c3775d12ee49cade"}, + {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cf6204fe865da605285c34cf1172879d0314ff267b1c35ff59de7154f35fdc2e"}, + {file = "pydantic_core-2.16.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d33dd21f572545649f90c38c227cc8631268ba25c460b5569abebdd0ec5974ca"}, + {file = "pydantic_core-2.16.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:49d5d58abd4b83fb8ce763be7794d09b2f50f10aa65c0f0c1696c677edeb7cbf"}, + {file = "pydantic_core-2.16.3-cp312-none-win32.whl", hash = "sha256:f53aace168a2a10582e570b7736cc5bef12cae9cf21775e3eafac597e8551fbe"}, + {file = "pydantic_core-2.16.3-cp312-none-win_amd64.whl", hash = "sha256:0d32576b1de5a30d9a97f300cc6a3f4694c428d956adbc7e6e2f9cad279e45ed"}, + {file = "pydantic_core-2.16.3-cp312-none-win_arm64.whl", hash = "sha256:ec08be75bb268473677edb83ba71e7e74b43c008e4a7b1907c6d57e940bf34b6"}, + {file = "pydantic_core-2.16.3-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:b1f6f5938d63c6139860f044e2538baeee6f0b251a1816e7adb6cbce106a1f01"}, + {file = "pydantic_core-2.16.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2a1ef6a36fdbf71538142ed604ad19b82f67b05749512e47f247a6ddd06afdc7"}, + {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:704d35ecc7e9c31d48926150afada60401c55efa3b46cd1ded5a01bdffaf1d48"}, + {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d937653a696465677ed583124b94a4b2d79f5e30b2c46115a68e482c6a591c8a"}, + {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c9803edf8e29bd825f43481f19c37f50d2b01899448273b3a7758441b512acf8"}, + {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:72282ad4892a9fb2da25defeac8c2e84352c108705c972db82ab121d15f14e6d"}, + {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f752826b5b8361193df55afcdf8ca6a57d0232653494ba473630a83ba50d8c9"}, + {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4384a8f68ddb31a0b0c3deae88765f5868a1b9148939c3f4121233314ad5532c"}, + {file = "pydantic_core-2.16.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:a4b2bf78342c40b3dc830880106f54328928ff03e357935ad26c7128bbd66ce8"}, + {file = "pydantic_core-2.16.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:13dcc4802961b5f843a9385fc821a0b0135e8c07fc3d9949fd49627c1a5e6ae5"}, + {file = "pydantic_core-2.16.3-cp38-none-win32.whl", hash = "sha256:e3e70c94a0c3841e6aa831edab1619ad5c511199be94d0c11ba75fe06efe107a"}, + {file = "pydantic_core-2.16.3-cp38-none-win_amd64.whl", hash = "sha256:ecdf6bf5f578615f2e985a5e1f6572e23aa632c4bd1dc67f8f406d445ac115ed"}, + {file = "pydantic_core-2.16.3-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:bda1ee3e08252b8d41fa5537413ffdddd58fa73107171a126d3b9ff001b9b820"}, + {file = "pydantic_core-2.16.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:21b888c973e4f26b7a96491c0965a8a312e13be108022ee510248fe379a5fa23"}, + {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:be0ec334369316fa73448cc8c982c01e5d2a81c95969d58b8f6e272884df0074"}, + {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b5b6079cc452a7c53dd378c6f881ac528246b3ac9aae0f8eef98498a75657805"}, + {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ee8d5f878dccb6d499ba4d30d757111847b6849ae07acdd1205fffa1fc1253c"}, + {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7233d65d9d651242a68801159763d09e9ec96e8a158dbf118dc090cd77a104c9"}, + {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c6119dc90483a5cb50a1306adb8d52c66e447da88ea44f323e0ae1a5fcb14256"}, + {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:578114bc803a4c1ff9946d977c221e4376620a46cf78da267d946397dc9514a8"}, + {file = "pydantic_core-2.16.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d8f99b147ff3fcf6b3cc60cb0c39ea443884d5559a30b1481e92495f2310ff2b"}, + {file = "pydantic_core-2.16.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4ac6b4ce1e7283d715c4b729d8f9dab9627586dafce81d9eaa009dd7f25dd972"}, + {file = "pydantic_core-2.16.3-cp39-none-win32.whl", hash = "sha256:e7774b570e61cb998490c5235740d475413a1f6de823169b4cf94e2fe9e9f6b2"}, + {file = "pydantic_core-2.16.3-cp39-none-win_amd64.whl", hash = "sha256:9091632a25b8b87b9a605ec0e61f241c456e9248bfdcf7abdf344fdb169c81cf"}, + {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:36fa178aacbc277bc6b62a2c3da95226520da4f4e9e206fdf076484363895d2c"}, + {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:dcca5d2bf65c6fb591fff92da03f94cd4f315972f97c21975398bd4bd046854a"}, + {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2a72fb9963cba4cd5793854fd12f4cfee731e86df140f59ff52a49b3552db241"}, + {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b60cc1a081f80a2105a59385b92d82278b15d80ebb3adb200542ae165cd7d183"}, + {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cbcc558401de90a746d02ef330c528f2e668c83350f045833543cd57ecead1ad"}, + {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:fee427241c2d9fb7192b658190f9f5fd6dfe41e02f3c1489d2ec1e6a5ab1e04a"}, + {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f4cb85f693044e0f71f394ff76c98ddc1bc0953e48c061725e540396d5c8a2e1"}, + {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:b29eeb887aa931c2fcef5aa515d9d176d25006794610c264ddc114c053bf96fe"}, + {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a425479ee40ff021f8216c9d07a6a3b54b31c8267c6e17aa88b70d7ebd0e5e5b"}, + {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:5c5cbc703168d1b7a838668998308018a2718c2130595e8e190220238addc96f"}, + {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:99b6add4c0b39a513d323d3b93bc173dac663c27b99860dd5bf491b240d26137"}, + {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75f76ee558751746d6a38f89d60b6228fa174e5172d143886af0f85aa306fd89"}, + {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:00ee1c97b5364b84cb0bd82e9bbf645d5e2871fb8c58059d158412fee2d33d8a"}, + {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:287073c66748f624be4cef893ef9174e3eb88fe0b8a78dc22e88eca4bc357ca6"}, + {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:ed25e1835c00a332cb10c683cd39da96a719ab1dfc08427d476bce41b92531fc"}, + {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:86b3d0033580bd6bbe07590152007275bd7af95f98eaa5bd36f3da219dcd93da"}, + {file = "pydantic_core-2.16.3.tar.gz", hash = "sha256:1cac689f80a3abab2d3c0048b29eea5751114054f032a941a32de4c852c59cad"}, ] [package.dependencies] @@ -765,13 +708,13 @@ typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" [[package]] name = "pydoclint" -version = "0.3.8" +version = "0.3.10" description = "A Python docstring linter that checks arguments, returns, yields, and raises sections" optional = false python-versions = ">=3.8" files = [ - {file = "pydoclint-0.3.8-py2.py3-none-any.whl", hash = "sha256:8e5e020071bb64056fd3f1d68f3b1162ffeb8a3fd6424f73fef7272dac62c166"}, - {file = "pydoclint-0.3.8.tar.gz", hash = "sha256:5a9686a5fb410343e998402686b87cc07df647ea3ab92528c0b0cf8505584e44"}, + {file = "pydoclint-0.3.10-py2.py3-none-any.whl", hash = "sha256:aef97818334503693f5e291580b71432f39d1688eabdd4aeb3df0367472af39c"}, + {file = "pydoclint-0.3.10.tar.gz", hash = "sha256:d078e521939e222f605e27b409383c9fc4ce64d805ca224612cdfb1040054e00"}, ] [package.dependencies] @@ -908,13 +851,13 @@ pytest-metadata = "*" [[package]] name = "pytest-metadata" -version = "3.0.0" +version = "3.1.1" description = "pytest plugin for test session metadata" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "pytest_metadata-3.0.0-py3-none-any.whl", hash = "sha256:a17b1e40080401dc23177599208c52228df463db191c1a573ccdffacd885e190"}, - {file = "pytest_metadata-3.0.0.tar.gz", hash = "sha256:769a9c65d2884bd583bc626b0ace77ad15dbe02dd91a9106d47fd46d9c2569ca"}, + {file = "pytest_metadata-3.1.1-py3-none-any.whl", hash = "sha256:c8e0844db684ee1c798cfa38908d20d67d0463ecb6137c72e91f418558dd5f4b"}, + {file = "pytest_metadata-3.1.1.tar.gz", hash = "sha256:d2a29b0355fbc03f168aa96d41ff88b1a3b44a3b02acbe491801c98a048017c8"}, ] [package.dependencies] @@ -1087,13 +1030,13 @@ jupyter = ["ipywidgets (>=7.5.1,<9)"] [[package]] name = "ruamel-yaml" -version = "0.18.5" +version = "0.18.6" description = "ruamel.yaml is a YAML parser/emitter that supports roundtrip preservation of comments, seq/map flow style, and map key order" optional = false python-versions = ">=3.7" files = [ - {file = "ruamel.yaml-0.18.5-py3-none-any.whl", hash = "sha256:a013ac02f99a69cdd6277d9664689eb1acba07069f912823177c5eced21a6ada"}, - {file = "ruamel.yaml-0.18.5.tar.gz", hash = "sha256:61917e3a35a569c1133a8f772e1226961bf5a1198bea7e23f06a0841dea1ab0e"}, + {file = "ruamel.yaml-0.18.6-py3-none-any.whl", hash = "sha256:57b53ba33def16c4f3d807c0ccbc00f8a6081827e81ba2491691b76882d0c636"}, + {file = "ruamel.yaml-0.18.6.tar.gz", hash = "sha256:8b27e6a217e786c6fbe5634d8f3f11bc63e0f80f6a5890f28863d9c45aac311b"}, ] [package.dependencies] @@ -1164,28 +1107,28 @@ files = [ [[package]] name = "ruff" -version = "0.1.11" +version = "0.2.2" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.1.11-py3-none-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:a7f772696b4cdc0a3b2e527fc3c7ccc41cdcb98f5c80fdd4f2b8c50eb1458196"}, - {file = "ruff-0.1.11-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:934832f6ed9b34a7d5feea58972635c2039c7a3b434fe5ba2ce015064cb6e955"}, - {file = "ruff-0.1.11-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ea0d3e950e394c4b332bcdd112aa566010a9f9c95814844a7468325290aabfd9"}, - {file = "ruff-0.1.11-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9bd4025b9c5b429a48280785a2b71d479798a69f5c2919e7d274c5f4b32c3607"}, - {file = "ruff-0.1.11-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e1ad00662305dcb1e987f5ec214d31f7d6a062cae3e74c1cbccef15afd96611d"}, - {file = "ruff-0.1.11-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:4b077ce83f47dd6bea1991af08b140e8b8339f0ba8cb9b7a484c30ebab18a23f"}, - {file = "ruff-0.1.11-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c4a88efecec23c37b11076fe676e15c6cdb1271a38f2b415e381e87fe4517f18"}, - {file = "ruff-0.1.11-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5b25093dad3b055667730a9b491129c42d45e11cdb7043b702e97125bcec48a1"}, - {file = "ruff-0.1.11-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:231d8fb11b2cc7c0366a326a66dafc6ad449d7fcdbc268497ee47e1334f66f77"}, - {file = "ruff-0.1.11-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:09c415716884950080921dd6237767e52e227e397e2008e2bed410117679975b"}, - {file = "ruff-0.1.11-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:0f58948c6d212a6b8d41cd59e349751018797ce1727f961c2fa755ad6208ba45"}, - {file = "ruff-0.1.11-py3-none-musllinux_1_2_i686.whl", hash = "sha256:190a566c8f766c37074d99640cd9ca3da11d8deae2deae7c9505e68a4a30f740"}, - {file = "ruff-0.1.11-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:6464289bd67b2344d2a5d9158d5eb81025258f169e69a46b741b396ffb0cda95"}, - {file = "ruff-0.1.11-py3-none-win32.whl", hash = "sha256:9b8f397902f92bc2e70fb6bebfa2139008dc72ae5177e66c383fa5426cb0bf2c"}, - {file = "ruff-0.1.11-py3-none-win_amd64.whl", hash = "sha256:eb85ee287b11f901037a6683b2374bb0ec82928c5cbc984f575d0437979c521a"}, - {file = "ruff-0.1.11-py3-none-win_arm64.whl", hash = "sha256:97ce4d752f964ba559c7023a86e5f8e97f026d511e48013987623915431c7ea9"}, - {file = "ruff-0.1.11.tar.gz", hash = "sha256:f9d4d88cb6eeb4dfe20f9f0519bd2eaba8119bde87c3d5065c541dbae2b5a2cb"}, + {file = "ruff-0.2.2-py3-none-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:0a9efb032855ffb3c21f6405751d5e147b0c6b631e3ca3f6b20f917572b97eb6"}, + {file = "ruff-0.2.2-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:d450b7fbff85913f866a5384d8912710936e2b96da74541c82c1b458472ddb39"}, + {file = "ruff-0.2.2-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ecd46e3106850a5c26aee114e562c329f9a1fbe9e4821b008c4404f64ff9ce73"}, + {file = "ruff-0.2.2-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5e22676a5b875bd72acd3d11d5fa9075d3a5f53b877fe7b4793e4673499318ba"}, + {file = "ruff-0.2.2-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1695700d1e25a99d28f7a1636d85bafcc5030bba9d0578c0781ba1790dbcf51c"}, + {file = "ruff-0.2.2-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:b0c232af3d0bd8f521806223723456ffebf8e323bd1e4e82b0befb20ba18388e"}, + {file = "ruff-0.2.2-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f63d96494eeec2fc70d909393bcd76c69f35334cdbd9e20d089fb3f0640216ca"}, + {file = "ruff-0.2.2-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6a61ea0ff048e06de273b2e45bd72629f470f5da8f71daf09fe481278b175001"}, + {file = "ruff-0.2.2-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e1439c8f407e4f356470e54cdecdca1bd5439a0673792dbe34a2b0a551a2fe3"}, + {file = "ruff-0.2.2-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:940de32dc8853eba0f67f7198b3e79bc6ba95c2edbfdfac2144c8235114d6726"}, + {file = "ruff-0.2.2-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:0c126da55c38dd917621552ab430213bdb3273bb10ddb67bc4b761989210eb6e"}, + {file = "ruff-0.2.2-py3-none-musllinux_1_2_i686.whl", hash = "sha256:3b65494f7e4bed2e74110dac1f0d17dc8e1f42faaa784e7c58a98e335ec83d7e"}, + {file = "ruff-0.2.2-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:1ec49be4fe6ddac0503833f3ed8930528e26d1e60ad35c2446da372d16651ce9"}, + {file = "ruff-0.2.2-py3-none-win32.whl", hash = "sha256:d920499b576f6c68295bc04e7b17b6544d9d05f196bb3aac4358792ef6f34325"}, + {file = "ruff-0.2.2-py3-none-win_amd64.whl", hash = "sha256:cc9a91ae137d687f43a44c900e5d95e9617cb37d4c989e462980ba27039d239d"}, + {file = "ruff-0.2.2-py3-none-win_arm64.whl", hash = "sha256:c9d15fc41e6054bfc7200478720570078f0b41c9ae4f010bcc16bd6f4d1aacdd"}, + {file = "ruff-0.2.2.tar.gz", hash = "sha256:e62ed7f36b3068a30ba39193a14274cd706bc486fad521276458022f7bccb31d"}, ] [[package]] @@ -1213,19 +1156,19 @@ gitlab = ["python-gitlab (>=1.3.0)"] [[package]] name = "setuptools" -version = "69.0.3" +version = "69.1.1" description = "Easily download, build, install, upgrade, and uninstall Python packages" optional = false python-versions = ">=3.8" files = [ - {file = "setuptools-69.0.3-py3-none-any.whl", hash = "sha256:385eb4edd9c9d5c17540511303e39a147ce2fc04bc55289c322b9e5904fe2c05"}, - {file = "setuptools-69.0.3.tar.gz", hash = "sha256:be1af57fc409f93647f2e8e4573a142ed38724b8cdd389706a867bb4efcf1e78"}, + {file = "setuptools-69.1.1-py3-none-any.whl", hash = "sha256:02fa291a0471b3a18b2b2481ed902af520c69e8ae0919c13da936542754b4c56"}, + {file = "setuptools-69.1.1.tar.gz", hash = "sha256:5c0806c7d9af348e6dd3777b4f4dbb42c7ad85b190104837488eab9a7c945cf8"}, ] [package.extras] docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"] -testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-ruff", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] -testing-integration = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "packaging (>=23.1)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"] +testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "packaging (>=23.2)", "pip (>=19.1)", "pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-home (>=0.5)", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-ruff (>=0.2.1)", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] +testing-integration = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "packaging (>=23.2)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"] [[package]] name = "shellingham" @@ -1238,17 +1181,6 @@ files = [ {file = "shellingham-1.5.4.tar.gz", hash = "sha256:8dbca0739d487e5bd35ab3ca4b36e11c4078f3a234bfce294b0a0291363404de"}, ] -[[package]] -name = "smmap" -version = "5.0.1" -description = "A pure Python implementation of a sliding window memory map manager" -optional = false -python-versions = ">=3.7" -files = [ - {file = "smmap-5.0.1-py3-none-any.whl", hash = "sha256:e6d8668fa5f93e706934a62d7b4db19c8d9eb8cf2adbb75ef1b675aa332b69da"}, - {file = "smmap-5.0.1.tar.gz", hash = "sha256:dceeb6c0028fdb6734471eb07c0cd2aae706ccaecab45965ee83f11c8d3b1f62"}, -] - [[package]] name = "snowballstemmer" version = "2.2.0" @@ -1262,13 +1194,13 @@ files = [ [[package]] name = "stevedore" -version = "5.1.0" +version = "5.2.0" description = "Manage dynamic plugins for Python applications" optional = false python-versions = ">=3.8" files = [ - {file = "stevedore-5.1.0-py3-none-any.whl", hash = "sha256:8cc040628f3cea5d7128f2e76cf486b2251a4e543c7b938f58d9a377f6694a2d"}, - {file = "stevedore-5.1.0.tar.gz", hash = "sha256:a54534acf9b89bc7ed264807013b505bf07f74dbe4bcfa37d32bd063870b087c"}, + {file = "stevedore-5.2.0-py3-none-any.whl", hash = "sha256:1c15d95766ca0569cad14cb6272d4d31dae66b011a929d7c18219c176ea1b5c9"}, + {file = "stevedore-5.2.0.tar.gz", hash = "sha256:46b93ca40e1114cea93d738a6c1e365396981bb6bb78c27045b7587c9473544d"}, ] [package.dependencies] @@ -1336,29 +1268,30 @@ files = [ [[package]] name = "urllib3" -version = "2.1.0" +version = "2.2.1" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false python-versions = ">=3.8" files = [ - {file = "urllib3-2.1.0-py3-none-any.whl", hash = "sha256:55901e917a5896a349ff771be919f8bd99aff50b79fe58fec595eb37bbc56bb3"}, - {file = "urllib3-2.1.0.tar.gz", hash = "sha256:df7aa8afb0148fa78488e7899b2c59b5f4ffcfa82e6c54ccb9dd37c1d7b52d54"}, + {file = "urllib3-2.2.1-py3-none-any.whl", hash = "sha256:450b20ec296a467077128bff42b73080516e71b56ff59a60a02bef2232c4fa9d"}, + {file = "urllib3-2.2.1.tar.gz", hash = "sha256:d0570876c61ab9e520d776c38acbbb5b05a776d3f9ff98a5c8fd5162a444cf19"}, ] [package.extras] brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] +h2 = ["h2 (>=4,<5)"] socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] zstd = ["zstandard (>=0.18.0)"] [[package]] name = "virtualenv" -version = "20.25.0" +version = "20.25.1" description = "Virtual Python Environment builder" optional = false python-versions = ">=3.7" files = [ - {file = "virtualenv-20.25.0-py3-none-any.whl", hash = "sha256:4238949c5ffe6876362d9c0180fc6c3a824a7b12b80604eeb8085f2ed7460de3"}, - {file = "virtualenv-20.25.0.tar.gz", hash = "sha256:bf51c0d9c7dd63ea8e44086fa1e4fb1093a31e963b86959257378aef020e1f1b"}, + {file = "virtualenv-20.25.1-py3-none-any.whl", hash = "sha256:961c026ac520bac5f69acb8ea063e8a4f071bcc9457b9c1f28f6b085c511583a"}, + {file = "virtualenv-20.25.1.tar.gz", hash = "sha256:e08e13ecdca7a0bd53798f356d5831434afa5b07b93f0abdf0797b7a06ffe197"}, ] [package.dependencies] @@ -1373,4 +1306,4 @@ test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess [metadata] lock-version = "2.0" python-versions = "^3.10" -content-hash = "1389751890bf2c1c64f410f1a3fc7b1d05c692c839b535e6220f74333ad18229" +content-hash = "2257cef332438dffd08c915ab1b6ca0c6c456d5ac513cfff94896039e09b61fa" diff --git a/stac_model/pyproject.toml b/stac_model/pyproject.toml index 59b02bc..043717b 100644 --- a/stac_model/pyproject.toml +++ b/stac_model/pyproject.toml @@ -73,7 +73,7 @@ pytest-sugar = "^0.9.7" pytest-click = "^1.1.0" pytest-pikachu = "^1.0.0" coverage = "^7.3.0" -ruff = "^0.1.7" +ruff = "^0.2.2" [tool.ruff] exclude = [ @@ -90,6 +90,21 @@ exclude = [ "venv" ] +[tool.ruff.lint] +select = [ + # pycodestyle + "E", + # Pyflakes + "F", + # pyupgrade + "UP", + # flake8-bugbear + "B", + # flake8-simplify + "SIM", + # isort + "I", +] [tool.mypy] # https://github.com/python/mypy diff --git a/stac_model/stac_model/__main__.py b/stac_model/stac_model/__main__.py index 6692014..324fdcc 100644 --- a/stac_model/stac_model/__main__.py +++ b/stac_model/stac_model/__main__.py @@ -2,12 +2,24 @@ from rich.console import Console from stac_model import __version__ -from stac_model.schema import InputArray, Statistics, ModelInput, Runtime, Asset, ResultArray, ModelOutput, ClassObject, MLModel - +from stac_model.schema import ( + Asset, + ClassObject, + InputArray, + MLModel, + ModelInput, + ModelOutput, + ResultArray, + Runtime, + Statistics, +) app = typer.Typer( name="stac-model", - help="A PydanticV2 validation and serialization library for the STAC Machine Learning Model Extension", + help=( + "A PydanticV2 validation and serialization library for the STAC Machine" + "Learning Model Extension" + ), add_completion=False, ) console = Console() @@ -36,33 +48,124 @@ def main( input_array = InputArray( shape=[-1, 13, 64, 64], dim_order="bchw", data_type="float32" ) - band_names = ["B01", "B02", "B03", "B04", "B05", "B06", "B07", "B08", "B8A", "B09", "B10", "B11", "B12"] - stats = Statistics(mean=[1354.40546513, 1118.24399958, 1042.92983953, 947.62620298, 1199.47283961, - 1999.79090914, 2369.22292565, 2296.82608323, 732.08340178, 12.11327804, - 1819.01027855, 1118.92391149, 2594.14080798], - stddev= [245.71762908, 333.00778264, 395.09249139, 593.75055589, 566.4170017, - 861.18399006, 1086.63139075, 1117.98170791, 404.91978886, 4.77584468, - 1002.58768311, 761.30323499, 1231.58581042]) - mlm_input = ModelInput(name= "13 Band Sentinel-2 Batch", bands=band_names, input_array=input_array, norm_by_channel=True, norm_type="z_score", rescale_type="none", statistics=stats, pre_processing_function = "https://github.com/microsoft/torchgeo/blob/545abe8326efc2848feae69d0212a15faba3eb00/torchgeo/datamodules/eurosat.py") - mlm_runtime = Runtime(framework= "torch", version= "2.1.2+cu121", asset= Asset(href= "https://huggingface.co/torchgeo/resnet18_sentinel2_all_moco/resolve/main/resnet18_sentinel2_all_moco-59bfdff9.pth"), - source_code= Asset(href="https://github.com/microsoft/torchgeo/blob/61efd2e2c4df7ebe3bd03002ebbaeaa3cfe9885a/torchgeo/models/resnet.py#L207"), accelerator="cuda", accelerator_constrained=False, hardware_summary="Unknown") - result_array = ResultArray(shape=[-1, 10], dim_names=["batch", "class"], data_type="float32") - class_map = {"Annual Crop": 0, - "Forest": 1, - "Herbaceous Vegetation": 2, - "Highway": 3, - "Industrial Buildings": 4, - "Pasture": 5, - "Permanent Crop": 6, - "Residential Buildings": 7, - "River": 8, - "SeaLake": 9} - class_objects = [ClassObject(value=class_map[class_name], name=class_name) for class_name in class_map] - mlm_output = ModelOutput(task= "classification", classification_classes=class_objects, output_shape=[-1, 10], result_array=[result_array]) - ml_model_meta = MLModel(mlm_name="Resnet-18 Sentinel-2 ALL MOCO", mlm_task="classification", mlm_framework = 'pytorch', mlm_framework_version="2.1.2+cu121", mlm_file_size=1, mlm_memory_size=1, mlm_summary= "Sourced from torchgeo python library, identifier is ResNet18_Weights.SENTINEL2_ALL_MOCO", mlm_pretrained_source="EuroSat Sentinel-2", mlm_total_parameters= 11_700_000, mlm_input=[mlm_input], mlm_runtime=[mlm_runtime], mlm_output=[mlm_output]) + band_names = [ + "B01", + "B02", + "B03", + "B04", + "B05", + "B06", + "B07", + "B08", + "B8A", + "B09", + "B10", + "B11", + "B12", + ] + stats = Statistics( + mean=[ + 1354.40546513, + 1118.24399958, + 1042.92983953, + 947.62620298, + 1199.47283961, + 1999.79090914, + 2369.22292565, + 2296.82608323, + 732.08340178, + 12.11327804, + 1819.01027855, + 1118.92391149, + 2594.14080798, + ], + stddev=[ + 245.71762908, + 333.00778264, + 395.09249139, + 593.75055589, + 566.4170017, + 861.18399006, + 1086.63139075, + 1117.98170791, + 404.91978886, + 4.77584468, + 1002.58768311, + 761.30323499, + 1231.58581042, + ], + ) + mlm_input = ModelInput( + name="13 Band Sentinel-2 Batch", + bands=band_names, + input_array=input_array, + norm_by_channel=True, + norm_type="z_score", + rescale_type="none", + statistics=stats, + pre_processing_function = "https://github.com/microsoft/torchgeo/blob/545abe8326efc2848feae69d0212a15faba3eb00/torchgeo/datamodules/eurosat.py" # noqa: E501 +, + ) + mlm_runtime = Runtime( + framework="torch", + version="2.1.2+cu121", + asset=Asset( + href="https://huggingface.co/torchgeo/resnet18_sentinel2_all_moco/resolve/main/resnet18_sentinel2_all_moco-59bfdff9.pth" # noqa: E501 + ), + source_code=Asset( + href="https://github.com/microsoft/torchgeo/blob/61efd2e2c4df7ebe3bd03002ebbaeaa3cfe9885a/torchgeo/models/resnet.py#L207" # noqa: E501 + ), + accelerator="cuda", + accelerator_constrained=False, + hardware_summary="Unknown", + ) + result_array = ResultArray( + shape=[-1, 10], dim_names=["batch", "class"], data_type="float32" + ) + class_map = { + "Annual Crop": 0, + "Forest": 1, + "Herbaceous Vegetation": 2, + "Highway": 3, + "Industrial Buildings": 4, + "Pasture": 5, + "Permanent Crop": 6, + "Residential Buildings": 7, + "River": 8, + "SeaLake": 9, + } + class_objects = [ + ClassObject(value=class_map[class_name], name=class_name) + for class_name in class_map + ] + mlm_output = ModelOutput( + task="classification", + classification_classes=class_objects, + output_shape=[-1, 10], + result_array=[result_array], + ) + ml_model_meta = MLModel( + mlm_name="Resnet-18 Sentinel-2 ALL MOCO", + mlm_task="classification", + mlm_framework="pytorch", + mlm_framework_version="2.1.2+cu121", + mlm_file_size=1, + mlm_memory_size=1, + mlm_summary=( + "Sourced from torchgeo python library," + "identifier is ResNet18_Weights.SENTINEL2_ALL_MOCO" + ), + mlm_pretrained_source="EuroSat Sentinel-2", + mlm_total_parameters=11_700_000, + mlm_input=[mlm_input], + mlm_runtime=[mlm_runtime], + mlm_output=[mlm_output], + ) json_str = ml_model_meta.model_dump_json(indent=2, exclude_none=True) with open("example.json", "w") as file: file.write(json_str) print(ml_model_meta.model_dump_json(indent=2, exclude_none=True)) + + if __name__ == "__main__": app() diff --git a/stac_model/stac_model/input.py b/stac_model/stac_model/input.py index 2f72b7e..baf8df8 100644 --- a/stac_model/stac_model/input.py +++ b/stac_model/stac_model/input.py @@ -1,13 +1,16 @@ -from typing import List, Optional, Dict, Literal, Union -from pydantic import ( - BaseModel, - Field, - AnyUrl -) +from typing import Dict, List, Literal, Optional, Union + +from pydantic import AnyUrl, BaseModel, Field + + class InputArray(BaseModel): - shape: List[Union[int,float]] + shape: List[Union[int, float]] dim_order: Literal["bhw", "bchw", "bthw", "btchw"] - data_type: str = Field(..., pattern="^(uint8|uint16|uint32|uint64|int8|int16|int32|int64|float16|float32|float64)$") + data_type: str = Field( + ..., + pattern="^(uint8|uint16|uint32|uint64|int8|int16|int32|int64|float16|float32|float64)$", + ) + class Statistics(BaseModel): minimum: Optional[List[Union[float, int]]] = None @@ -17,6 +20,7 @@ class Statistics(BaseModel): count: Optional[List[int]] = None valid_percent: Optional[List[float]] = None + class Band(BaseModel): name: str description: Optional[str] = None @@ -24,13 +28,22 @@ class Band(BaseModel): data_type: str unit: Optional[str] = None + class ModelInput(BaseModel): name: str bands: List[str] input_array: InputArray parameters: Dict[str, Union[int, str, bool, List[Union[int, str, bool]]]] = None norm_by_channel: bool = None - norm_type: Literal["min_max", "z_score", "max_norm", "mean_norm", "unit_variance", "norm_with_clip", "none"] = None + norm_type: Literal[ + "min_max", + "z_score", + "max_norm", + "mean_norm", + "unit_variance", + "norm_with_clip", + "none", + ] = None resize_type: Literal["crop", "pad", "interpolation", "none"] = None statistics: Optional[Union[Statistics, List[Statistics]]] = None norm_with_clip_values: Optional[List[Union[float, int]]] = None diff --git a/stac_model/stac_model/output.py b/stac_model/stac_model/output.py index 9d099dc..01b7d0b 100644 --- a/stac_model/stac_model/output.py +++ b/stac_model/stac_model/output.py @@ -1,23 +1,31 @@ -from pydantic import BaseModel, Field -from typing import List, Union, Optional from enum import Enum +from typing import List, Optional, Union + +from pydantic import BaseModel, Field + + class TaskEnum(str, Enum): regression = "regression" classification = "classification" object_detection = "object detection" semantic_segmentation = "semantic segmentation" - instance_segmentation = "instance segmentation" + instance_segmentation = "instance segmentation" panoptic_segmentation = "panoptic segmentation" multi_modal = "multi-modal" similarity_search = "similarity search" image_captioning = "image captioning" - generative = "generative" + generative = "generative" super_resolution = "super resolution" + class ResultArray(BaseModel): - shape: List[Union[int,float]] + shape: List[Union[int, float]] dim_names: List[str] - data_type: str = Field(..., pattern="^(uint8|uint16|uint32|uint64|int8|int16|int32|int64|float16|float32|float64)$") + data_type: str = Field( + ..., + pattern="^(uint8|uint16|uint32|uint64|int8|int16|int32|int64|float16|float32|float64)$", + ) + class ClassObject(BaseModel): value: int @@ -26,6 +34,8 @@ class ClassObject(BaseModel): title: str = None color_hint: str = None nodata: bool = False + + class ModelOutput(BaseModel): task: TaskEnum result_array: List[ResultArray] = None diff --git a/stac_model/stac_model/paths.py b/stac_model/stac_model/paths.py index 95b5bc3..7c67400 100644 --- a/stac_model/stac_model/paths.py +++ b/stac_model/stac_model/paths.py @@ -1,8 +1,8 @@ -from pydantic import ( - field_validator, - AnyUrl -) import re + +from pydantic import AnyUrl, field_validator + + class S3Path(AnyUrl): allowed_schemes = {"s3"} user_required = False @@ -29,7 +29,7 @@ def validate_bucket_name(cls, v): raise ValueError("Bucket name must be between 3 and 63 characters") if not re.match(r"^[a-z0-9.\-]+$", v): raise ValueError( - "Bucket name can only contain lowercase letters, numbers, dots, and hyphens" + "Bucket name can only contain lowercase, numbers, dots, and hyphens" ) if v.startswith("-") or v.endswith("-"): raise ValueError("Bucket name cannot start or end with a hyphen") diff --git a/stac_model/stac_model/runtime.py b/stac_model/stac_model/runtime.py index 20043ad..11ac146 100644 --- a/stac_model/stac_model/runtime.py +++ b/stac_model/stac_model/runtime.py @@ -1,10 +1,14 @@ -from .paths import S3Path -from pydantic import BaseModel, FilePath, field_validator -from typing import Optional, List from enum import Enum +from typing import List, Optional + +from pydantic import BaseModel, FilePath, field_validator + +from .paths import S3Path + + class Asset(BaseModel): - """Information about the model location and other additional file locations. Follows - the Asset Object spec: https://github.com/radiantearth/stac-spec/blob/master/collection-spec/collection-spec.md#asset-object + """Information about the model location and other additional file locations. + Follows the STAC Asset Object spec. """ href: S3Path | FilePath | str @@ -13,7 +17,6 @@ class Asset(BaseModel): type: Optional[str] = None roles: Optional[List[str]] = None - class Config: arbitrary_types_allowed = True @@ -21,16 +24,14 @@ class Config: @classmethod def check_path_type(cls, v): if isinstance(v, str): - if v.startswith("s3://"): - v = S3Path(url=v) - else: - v = FilePath(f=v) + v = S3Path(url=v) if v.startswith("s3://") else FilePath(f=v) else: raise ValueError( f"Expected str, S3Path, or FilePath input, received {type(v).__name__}" ) return v + class Container(BaseModel): container_file: str image_name: str @@ -39,6 +40,7 @@ class Container(BaseModel): run: str accelerator: bool + class AcceleratorEnum(str, Enum): amd64 = "amd64" cuda = "cuda" @@ -51,6 +53,7 @@ class AcceleratorEnum(str, Enum): def __str__(self): return self.value + class Runtime(BaseModel): asset: Asset source_code: Asset diff --git a/stac_model/stac_model/schema.py b/stac_model/stac_model/schema.py index 2b053aa..f2cfb2e 100644 --- a/stac_model/stac_model/schema.py +++ b/stac_model/stac_model/schema.py @@ -1,8 +1,11 @@ +from typing import Dict, List, Union + from pydantic import BaseModel -from .input import ModelInput, InputArray, Band, Statistics -from .output import ModelOutput, ClassObject, ResultArray, TaskEnum -from .runtime import Runtime, Asset, Container -from typing import List, Dict, Union + +from .input import Band, InputArray, ModelInput, Statistics +from .output import ClassObject, ModelOutput, ResultArray, TaskEnum +from .runtime import Asset, Container, Runtime + class MLModel(BaseModel): mlm_name: str @@ -19,4 +22,18 @@ class MLModel(BaseModel): mlm_summary: str mlm_parameters: Dict[str, Union[int, str, bool, List[Union[int, str, bool]]]] = None -__all__ = ["MLModel", "ModelInput", "InputArray", "Band", "Statistics", "ModelOutput", "ClassObject", "Asset", "ResultArray", "Runtime", "Container", "Asset"] + +__all__ = [ + "MLModel", + "ModelInput", + "InputArray", + "Band", + "Statistics", + "ModelOutput", + "ClassObject", + "Asset", + "ResultArray", + "Runtime", + "Container", + "Asset", +] diff --git a/stac_model/tests/test_schema.py b/stac_model/tests/test_schema.py index b23bfdc..4987c98 100644 --- a/stac_model/tests/test_schema.py +++ b/stac_model/tests/test_schema.py @@ -1,13 +1,15 @@ +import os +import tempfile + import pytest + from stac_model.schema import ( - TensorSignature, - ModelSignature, - ModelArtifact, ClassMap, + ModelArtifact, ModelMetadata, + ModelSignature, + TensorSignature, ) -import os -import tempfile def create_metadata(): @@ -56,7 +58,7 @@ def test_model_metadata_json_operations(metadata_json): file.write(metadata_json) # Read and validate the model metadata from the JSON file - with open(temp_filepath, "r") as json_file: + with open(temp_filepath) as json_file: json_str = json_file.read() model_metadata = ModelMetadata.model_validate_json(json_str) From b8efda07058c1954065a454c2013939a176fa718 Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Fri, 23 Feb 2024 11:53:39 -0800 Subject: [PATCH 038/112] address metadata and text comments --- stac_model/pyproject.toml | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/stac_model/pyproject.toml b/stac_model/pyproject.toml index 043717b..f206765 100644 --- a/stac_model/pyproject.toml +++ b/stac_model/pyproject.toml @@ -24,15 +24,24 @@ keywords = [] # UPDATEME with relevant keywords # Pypi classifiers: https://pypi.org/classifiers/ -classifiers = [ # UPDATEME with additional classifiers; remove last classifier to allow publishing on PyPI +classifiers = [ "Development Status :: 3 - Alpha", - "Intended Audience :: Developers", "Operating System :: OS Independent", "Topic :: Software Development :: Libraries :: Python Modules", "License :: OSI Approved :: Apache Software License", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", + "Framework :: Pydantic", + "Framework :: Pydantic :: 2", + "Intended Audience :: Developers", + "Intended Audience :: Information Technology", + "Intended Audience :: Science/Research", + "Topic :: File Formats :: JSON :: JSON Schema", + "Topic :: Scientific/Engineering :: Artificial Intelligence", + "Topic :: Scientific/Engineering :: GIS", + "Topic :: Scientific/Engineering :: Image Processing", + "Topic :: Scientific/Engineering :: Image Recognition", ] From fb1de05e8d0498c49b5e342583e7d21ca50e1981 Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Fri, 23 Feb 2024 11:59:10 -0800 Subject: [PATCH 039/112] remove old model_metadata.py --- stac_model/model_metadata.py | 131 ----------------------------------- 1 file changed, 131 deletions(-) delete mode 100644 stac_model/model_metadata.py diff --git a/stac_model/model_metadata.py b/stac_model/model_metadata.py deleted file mode 100644 index 9e0c502..0000000 --- a/stac_model/model_metadata.py +++ /dev/null @@ -1,131 +0,0 @@ -from typing import Any, Dict, List, Literal, Optional, Tuple -from uuid import uuid4 - -from pydantic import AnyUrl, BaseModel, Field, FilePath - - -# Pydantic Models -class TensorSignature(BaseModel): - name: Optional[str] = None - dtype: Any = Field(...) - shape: Tuple[int, ...] | List[int] = Field(...) - - -class ModelSignature(BaseModel): - inputs: List[TensorSignature] - outputs: List[TensorSignature] - params: Optional[Dict[str, int | float | str]] = None - - class Config: - arbitrary_types_allowed = True - - -class RuntimeConfig(BaseModel): - environment: str - - -class S3Path(AnyUrl): - allowed_schemes = {"s3"} - user_required = False - max_length = 1023 - min_length = 8 - - @classmethod - def validate_s3_url(cls, v): - if not v.startswith("s3://"): - raise ValueError("S3 path must start with s3://") - return v - - @classmethod - def validate_bucket_name(cls, v): - if not v: - raise ValueError("Bucket name cannot be empty") - return v - - @classmethod - def validate_key(cls, v): - if "//" in v: - raise ValueError("Key must not contain double slashes") - return v.strip("/") - - -class ModelArtifact(BaseModel): - path: S3Path | FilePath | str = Field(...) - additional_files: Optional[Dict[str, FilePath]] = None - - class Config: - arbitrary_types_allowed = True - - -class ClassMap(BaseModel): - class_to_label_id: Dict[str, int] - - @property - def label_id_to_class(self) -> Dict[int, str]: - return {v: k for k, v in self.class_to_label_id.items()} - - -class ModelMetadata(BaseModel): - signatures: ModelSignature - artifact: ModelArtifact - id: str = Field(default_factory=lambda: uuid4().hex) - class_map: ClassMap - runtime_config: Optional[RuntimeConfig] = None - name: str - ml_model_type: Optional[str] = None - ml_model_processor_type: Optional[Literal["cpu", "gpu", "tpu", "mps"]] = None - ml_model_learning_approach: Optional[str] = None - ml_model_prediction_type: Optional[ - Literal["object-detection", "classification", "segmentation", "regression"] - ] = None - ml_model_architecture: Optional[str] = None - - class Config: - arbitrary_types_allowed = True - - -# Functions to create, serialize, and deserialize ModelMetadata -def create_metadata(): - input_sig = TensorSignature( - name="input_tensor", dtype="float32", shape=(-1, 13, 64, 64) - ) - output_sig = TensorSignature(name="output_tensor", dtype="float32", shape=(-1, 10)) - model_sig = ModelSignature(inputs=[input_sig], outputs=[output_sig]) - model_artifact = ModelArtifact(path="s3://example/s3/uri/model.pt") - class_map = ClassMap( - class_to_label_id={ - "Annual Crop": 0, - "Forest": 1, - "Herbaceous Vegetation": 2, - "Highway": 3, - "Industrial Buildings": 4, - "Pasture": 5, - "Permanent Crop": 6, - "Residential Buildings": 7, - "River": 8, - "SeaLake": 9, - } - ) - return ModelMetadata( - name="eurosat", - class_map=class_map, - signatures=model_sig, - artifact=model_artifact, - ml_model_processor_type="cpu", - ) - - -def metadata_json(metadata: ModelMetadata) -> str: - return metadata.model_dump_json(indent=2) - - -def model_metadata_json_operations(json_str: str) -> ModelMetadata: - return ModelMetadata.model_validate_json(json_str) - - -# Running the functions end-to-end -metadata = create_metadata() -json_str = metadata_json(metadata) -model_metadata = model_metadata_json_operations(json_str) - -print("Model Metadata Name:", model_metadata.name) From b2318ad47543db4428c0da4d908edbcdf4bec343 Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Fri, 23 Feb 2024 12:30:23 -0800 Subject: [PATCH 040/112] correct mlm: prefix --- README.md | 4 ++-- best-practices.md | 4 ++++ stac_model/example.json | 24 ++++++++++++------------ stac_model/stac_model/__main__.py | 4 ++-- stac_model/stac_model/schema.py | 10 ++++++++-- 5 files changed, 28 insertions(+), 18 deletions(-) diff --git a/README.md b/README.md index 11cf2c3..71d291d 100644 --- a/README.md +++ b/README.md @@ -29,9 +29,9 @@ Specifically, this extension records the following information to make ML models The MLM specification is biased towards providing metadata fields for supervised machine learning models. However, fields that relate to supervised ML are optional and users can use the fields they need for different tasks. -See [Best Practices](./best-practices.md) for guidance on what other extensions to use for documenting models with this extension. +See [Best Practices](./best-practices.md) for guidance on what other STAC extensions you should use in conjunction with this extension. The Machine Learning Model Extension purposely omits and delegates some definitions to other STAC extensions to favor reusability and avoid metadata duplication whenever possible. A properly defined MLM STAC Item/Collection should almost never have the Machine Learning Model Extension exclusively in `stac_extensions`. -Check the original technical report for an earlier version of the Model Extension [here](https://github.com/crim-ca/CCCOT03/raw/main/CCCOT03_Rapport%20Final_FINAL_EN.pdf) for more details. +Check the original technical report for an earlier version of the Model Extension, formerly known as the Deep Learning Model Extension (DLM), [here](https://github.com/crim-ca/CCCOT03/raw/main/CCCOT03_Rapport%20Final_FINAL_EN.pdf) for more details. The DLM was renamed to the current MLM Extension and refactored to form a cohesive definition across all machine learning approaches, regardless of whether the approach constitutes a deep neural network or other statistical approach. ![Image Description](https://i.imgur.com/cVAg5sA.png) diff --git a/best-practices.md b/best-practices.md index 858a49d..1d24ff8 100644 --- a/best-practices.md +++ b/best-practices.md @@ -19,4 +19,8 @@ For example: } ``` +STAC Items or STAC Assets with asset properties resulting from the model inference should be annotated with [`processing:level = L4`](https://github.com/stac-extensions/processing?tab=readme-ov-file#suggested-processing-levels). + +> Model output or results from analyses of lower level data (i.e.,variables that are not directly measured by the instruments, but are derived from these measurements) + TODO provide other suggestions on extensions to compose with this one. STAC ML AOI, STAC Label, ... diff --git a/stac_model/example.json b/stac_model/example.json index 2a71978..ef32350 100644 --- a/stac_model/example.json +++ b/stac_model/example.json @@ -1,11 +1,11 @@ { - "mlm_name": "Resnet-18 Sentinel-2 ALL MOCO", - "mlm_task": "classification", - "mlm_framework": "pytorch", - "mlm_framework_version": "2.1.2+cu121", - "mlm_file_size": 1, - "mlm_memory_size": 1, - "mlm_input": [ + "mlm:name": "Resnet-18 Sentinel-2 ALL MOCO", + "mlm:task": "classification", + "mlm:framework": "pytorch", + "mlm:framework_version": "2.1.2+cu121", + "mlm:file_size": 1, + "mlm:memory_size": 1, + "mlm:input": [ { "name": "13 Band Sentinel-2 Batch", "bands": [ @@ -70,7 +70,7 @@ "pre_processing_function": "https://github.com/microsoft/torchgeo/blob/545abe8326efc2848feae69d0212a15faba3eb00/torchgeo/datamodules/eurosat.py" } ], - "mlm_output": [ + "mlm:output": [ { "task": "classification", "result_array": [ @@ -140,7 +140,7 @@ ] } ], - "mlm_runtime": [ + "mlm:runtime": [ { "asset": { "href": "." @@ -153,7 +153,7 @@ "hardware_summary": "Unknown" } ], - "mlm_total_parameters": 11700000, - "mlm_pretrained_source": "EuroSat Sentinel-2", - "mlm_summary": "Sourced from torchgeo python library, identifier is ResNet18_Weights.SENTINEL2_ALL_MOCO" + "mlm:total_parameters": 11700000, + "mlm:pretrained_source": "EuroSat Sentinel-2", + "mlm:summary": "Sourced from torchgeo python library,identifier is ResNet18_Weights.SENTINEL2_ALL_MOCO" } diff --git a/stac_model/stac_model/__main__.py b/stac_model/stac_model/__main__.py index 324fdcc..5034250 100644 --- a/stac_model/stac_model/__main__.py +++ b/stac_model/stac_model/__main__.py @@ -161,10 +161,10 @@ def main( mlm_runtime=[mlm_runtime], mlm_output=[mlm_output], ) - json_str = ml_model_meta.model_dump_json(indent=2, exclude_none=True) + json_str = ml_model_meta.model_dump_json(indent=2, exclude_none=True, by_alias=True) with open("example.json", "w") as file: file.write(json_str) - print(ml_model_meta.model_dump_json(indent=2, exclude_none=True)) + print(ml_model_meta.model_dump_json(indent=2, exclude_none=True, by_alias=True)) if __name__ == "__main__": diff --git a/stac_model/stac_model/schema.py b/stac_model/stac_model/schema.py index f2cfb2e..3df0926 100644 --- a/stac_model/stac_model/schema.py +++ b/stac_model/stac_model/schema.py @@ -1,12 +1,15 @@ from typing import Dict, List, Union -from pydantic import BaseModel +from pydantic import BaseModel, ConfigDict from .input import Band, InputArray, ModelInput, Statistics from .output import ClassObject, ModelOutput, ResultArray, TaskEnum from .runtime import Asset, Container, Runtime +def mlm_prefix_replacer(field_name: str) -> str: + return field_name.replace("mlm_", "mlm:") + class MLModel(BaseModel): mlm_name: str mlm_task: TaskEnum @@ -20,7 +23,10 @@ class MLModel(BaseModel): mlm_total_parameters: int mlm_pretrained_source: str mlm_summary: str - mlm_parameters: Dict[str, Union[int, str, bool, List[Union[int, str, bool]]]] = None + mlm_parameters: Dict[str, Union[int, str, bool, List[Union[int, str, bool]]]] = None # noqa: E501 + + model_config = ConfigDict(alias_generator=mlm_prefix_replacer, + populate_by_name=True, extra="ignore") __all__ = [ From 43a744006cda24099c2173c5bab83819797812a9 Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Fri, 23 Feb 2024 14:19:38 -0800 Subject: [PATCH 041/112] remove extra column --- stac_model/poetry.lock | 106 ++++++++++++++++++++--------------------- 1 file changed, 53 insertions(+), 53 deletions(-) diff --git a/stac_model/poetry.lock b/stac_model/poetry.lock index 15f9d0f..9c919a1 100644 --- a/stac_model/poetry.lock +++ b/stac_model/poetry.lock @@ -182,63 +182,63 @@ files = [ [[package]] name = "coverage" -version = "7.4.2" +version = "7.4.3" description = "Code coverage measurement for Python" optional = false python-versions = ">=3.8" files = [ - {file = "coverage-7.4.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bf54c3e089179d9d23900e3efc86d46e4431188d9a657f345410eecdd0151f50"}, - {file = "coverage-7.4.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fe6e43c8b510719b48af7db9631b5fbac910ade4bd90e6378c85ac5ac706382c"}, - {file = "coverage-7.4.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b98c89db1b150d851a7840142d60d01d07677a18f0f46836e691c38134ed18b"}, - {file = "coverage-7.4.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c5f9683be6a5b19cd776ee4e2f2ffb411424819c69afab6b2db3a0a364ec6642"}, - {file = "coverage-7.4.2-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78cdcbf7b9cb83fe047ee09298e25b1cd1636824067166dc97ad0543b079d22f"}, - {file = "coverage-7.4.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:2599972b21911111114100d362aea9e70a88b258400672626efa2b9e2179609c"}, - {file = "coverage-7.4.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:ef00d31b7569ed3cb2036f26565f1984b9fc08541731ce01012b02a4c238bf03"}, - {file = "coverage-7.4.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:20a875bfd8c282985c4720c32aa05056f77a68e6d8bbc5fe8632c5860ee0b49b"}, - {file = "coverage-7.4.2-cp310-cp310-win32.whl", hash = "sha256:b3f2b1eb229f23c82898eedfc3296137cf1f16bb145ceab3edfd17cbde273fb7"}, - {file = "coverage-7.4.2-cp310-cp310-win_amd64.whl", hash = "sha256:7df95fdd1432a5d2675ce630fef5f239939e2b3610fe2f2b5bf21fa505256fa3"}, - {file = "coverage-7.4.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a8ddbd158e069dded57738ea69b9744525181e99974c899b39f75b2b29a624e2"}, - {file = "coverage-7.4.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:81a5fb41b0d24447a47543b749adc34d45a2cf77b48ca74e5bf3de60a7bd9edc"}, - {file = "coverage-7.4.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2412e98e70f16243be41d20836abd5f3f32edef07cbf8f407f1b6e1ceae783ac"}, - {file = "coverage-7.4.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ddb79414c15c6f03f56cc68fa06994f047cf20207c31b5dad3f6bab54a0f66ef"}, - {file = "coverage-7.4.2-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cf89ab85027427d351f1de918aff4b43f4eb5f33aff6835ed30322a86ac29c9e"}, - {file = "coverage-7.4.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a178b7b1ac0f1530bb28d2e51f88c0bab3e5949835851a60dda80bff6052510c"}, - {file = "coverage-7.4.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:06fe398145a2e91edaf1ab4eee66149c6776c6b25b136f4a86fcbbb09512fd10"}, - {file = "coverage-7.4.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:18cac867950943fe93d6cd56a67eb7dcd2d4a781a40f4c1e25d6f1ed98721a55"}, - {file = "coverage-7.4.2-cp311-cp311-win32.whl", hash = "sha256:f72cdd2586f9a769570d4b5714a3837b3a59a53b096bb954f1811f6a0afad305"}, - {file = "coverage-7.4.2-cp311-cp311-win_amd64.whl", hash = "sha256:d779a48fac416387dd5673fc5b2d6bd903ed903faaa3247dc1865c65eaa5a93e"}, - {file = "coverage-7.4.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:adbdfcda2469d188d79771d5696dc54fab98a16d2ef7e0875013b5f56a251047"}, - {file = "coverage-7.4.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ac4bab32f396b03ebecfcf2971668da9275b3bb5f81b3b6ba96622f4ef3f6e17"}, - {file = "coverage-7.4.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:006d220ba2e1a45f1de083d5022d4955abb0aedd78904cd5a779b955b019ec73"}, - {file = "coverage-7.4.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3733545eb294e5ad274abe131d1e7e7de4ba17a144505c12feca48803fea5f64"}, - {file = "coverage-7.4.2-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42a9e754aa250fe61f0f99986399cec086d7e7a01dd82fd863a20af34cbce962"}, - {file = "coverage-7.4.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:2ed37e16cf35c8d6e0b430254574b8edd242a367a1b1531bd1adc99c6a5e00fe"}, - {file = "coverage-7.4.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:b953275d4edfab6cc0ed7139fa773dfb89e81fee1569a932f6020ce7c6da0e8f"}, - {file = "coverage-7.4.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:32b4ab7e6c924f945cbae5392832e93e4ceb81483fd6dc4aa8fb1a97b9d3e0e1"}, - {file = "coverage-7.4.2-cp312-cp312-win32.whl", hash = "sha256:f5df76c58977bc35a49515b2fbba84a1d952ff0ec784a4070334dfbec28a2def"}, - {file = "coverage-7.4.2-cp312-cp312-win_amd64.whl", hash = "sha256:34423abbaad70fea9d0164add189eabaea679068ebdf693baa5c02d03e7db244"}, - {file = "coverage-7.4.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5b11f9c6587668e495cc7365f85c93bed34c3a81f9f08b0920b87a89acc13469"}, - {file = "coverage-7.4.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:51593a1f05c39332f623d64d910445fdec3d2ac2d96b37ce7f331882d5678ddf"}, - {file = "coverage-7.4.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69f1665165ba2fe7614e2f0c1aed71e14d83510bf67e2ee13df467d1c08bf1e8"}, - {file = "coverage-7.4.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3c8bbb95a699c80a167478478efe5e09ad31680931ec280bf2087905e3b95ec"}, - {file = "coverage-7.4.2-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:175f56572f25e1e1201d2b3e07b71ca4d201bf0b9cb8fad3f1dfae6a4188de86"}, - {file = "coverage-7.4.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8562ca91e8c40864942615b1d0b12289d3e745e6b2da901d133f52f2d510a1e3"}, - {file = "coverage-7.4.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:d9a1ef0f173e1a19738f154fb3644f90d0ada56fe6c9b422f992b04266c55d5a"}, - {file = "coverage-7.4.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:f40ac873045db4fd98a6f40387d242bde2708a3f8167bd967ccd43ad46394ba2"}, - {file = "coverage-7.4.2-cp38-cp38-win32.whl", hash = "sha256:d1b750a8409bec61caa7824bfd64a8074b6d2d420433f64c161a8335796c7c6b"}, - {file = "coverage-7.4.2-cp38-cp38-win_amd64.whl", hash = "sha256:b4ae777bebaed89e3a7e80c4a03fac434a98a8abb5251b2a957d38fe3fd30088"}, - {file = "coverage-7.4.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3ff7f92ae5a456101ca8f48387fd3c56eb96353588e686286f50633a611afc95"}, - {file = "coverage-7.4.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:861d75402269ffda0b33af94694b8e0703563116b04c681b1832903fac8fd647"}, - {file = "coverage-7.4.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3507427d83fa961cbd73f11140f4a5ce84208d31756f7238d6257b2d3d868405"}, - {file = "coverage-7.4.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bf711d517e21fb5bc429f5c4308fbc430a8585ff2a43e88540264ae87871e36a"}, - {file = "coverage-7.4.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c00e54f0bd258ab25e7f731ca1d5144b0bf7bec0051abccd2bdcff65fa3262c9"}, - {file = "coverage-7.4.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:f8e845d894e39fb53834da826078f6dc1a933b32b1478cf437007367efaf6f6a"}, - {file = "coverage-7.4.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:840456cb1067dc350af9080298c7c2cfdddcedc1cb1e0b30dceecdaf7be1a2d3"}, - {file = "coverage-7.4.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c11ca2df2206a4e3e4c4567f52594637392ed05d7c7fb73b4ea1c658ba560265"}, - {file = "coverage-7.4.2-cp39-cp39-win32.whl", hash = "sha256:3ff5bdb08d8938d336ce4088ca1a1e4b6c8cd3bef8bb3a4c0eb2f37406e49643"}, - {file = "coverage-7.4.2-cp39-cp39-win_amd64.whl", hash = "sha256:ac9e95cefcf044c98d4e2c829cd0669918585755dd9a92e28a1a7012322d0a95"}, - {file = "coverage-7.4.2-pp38.pp39.pp310-none-any.whl", hash = "sha256:f593a4a90118d99014517c2679e04a4ef5aee2d81aa05c26c734d271065efcb6"}, - {file = "coverage-7.4.2.tar.gz", hash = "sha256:1a5ee18e3a8d766075ce9314ed1cb695414bae67df6a4b0805f5137d93d6f1cb"}, + {file = "coverage-7.4.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8580b827d4746d47294c0e0b92854c85a92c2227927433998f0d3320ae8a71b6"}, + {file = "coverage-7.4.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:718187eeb9849fc6cc23e0d9b092bc2348821c5e1a901c9f8975df0bc785bfd4"}, + {file = "coverage-7.4.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:767b35c3a246bcb55b8044fd3a43b8cd553dd1f9f2c1eeb87a302b1f8daa0524"}, + {file = "coverage-7.4.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae7f19afe0cce50039e2c782bff379c7e347cba335429678450b8fe81c4ef96d"}, + {file = "coverage-7.4.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba3a8aaed13770e970b3df46980cb068d1c24af1a1968b7818b69af8c4347efb"}, + {file = "coverage-7.4.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:ee866acc0861caebb4f2ab79f0b94dbfbdbfadc19f82e6e9c93930f74e11d7a0"}, + {file = "coverage-7.4.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:506edb1dd49e13a2d4cac6a5173317b82a23c9d6e8df63efb4f0380de0fbccbc"}, + {file = "coverage-7.4.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd6545d97c98a192c5ac995d21c894b581f1fd14cf389be90724d21808b657e2"}, + {file = "coverage-7.4.3-cp310-cp310-win32.whl", hash = "sha256:f6a09b360d67e589236a44f0c39218a8efba2593b6abdccc300a8862cffc2f94"}, + {file = "coverage-7.4.3-cp310-cp310-win_amd64.whl", hash = "sha256:18d90523ce7553dd0b7e23cbb28865db23cddfd683a38fb224115f7826de78d0"}, + {file = "coverage-7.4.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cbbe5e739d45a52f3200a771c6d2c7acf89eb2524890a4a3aa1a7fa0695d2a47"}, + {file = "coverage-7.4.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:489763b2d037b164846ebac0cbd368b8a4ca56385c4090807ff9fad817de4113"}, + {file = "coverage-7.4.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:451f433ad901b3bb00184d83fd83d135fb682d780b38af7944c9faeecb1e0bfe"}, + {file = "coverage-7.4.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fcc66e222cf4c719fe7722a403888b1f5e1682d1679bd780e2b26c18bb648cdc"}, + {file = "coverage-7.4.3-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b3ec74cfef2d985e145baae90d9b1b32f85e1741b04cd967aaf9cfa84c1334f3"}, + {file = "coverage-7.4.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:abbbd8093c5229c72d4c2926afaee0e6e3140de69d5dcd918b2921f2f0c8baba"}, + {file = "coverage-7.4.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:35eb581efdacf7b7422af677b92170da4ef34500467381e805944a3201df2079"}, + {file = "coverage-7.4.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:8249b1c7334be8f8c3abcaaa996e1e4927b0e5a23b65f5bf6cfe3180d8ca7840"}, + {file = "coverage-7.4.3-cp311-cp311-win32.whl", hash = "sha256:cf30900aa1ba595312ae41978b95e256e419d8a823af79ce670835409fc02ad3"}, + {file = "coverage-7.4.3-cp311-cp311-win_amd64.whl", hash = "sha256:18c7320695c949de11a351742ee001849912fd57e62a706d83dfc1581897fa2e"}, + {file = "coverage-7.4.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b51bfc348925e92a9bd9b2e48dad13431b57011fd1038f08316e6bf1df107d10"}, + {file = "coverage-7.4.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d6cdecaedea1ea9e033d8adf6a0ab11107b49571bbb9737175444cea6eb72328"}, + {file = "coverage-7.4.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3b2eccb883368f9e972e216c7b4c7c06cabda925b5f06dde0650281cb7666a30"}, + {file = "coverage-7.4.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6c00cdc8fa4e50e1cc1f941a7f2e3e0f26cb2a1233c9696f26963ff58445bac7"}, + {file = "coverage-7.4.3-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b9a4a8dd3dcf4cbd3165737358e4d7dfbd9d59902ad11e3b15eebb6393b0446e"}, + {file = "coverage-7.4.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:062b0a75d9261e2f9c6d071753f7eef0fc9caf3a2c82d36d76667ba7b6470003"}, + {file = "coverage-7.4.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:ebe7c9e67a2d15fa97b77ea6571ce5e1e1f6b0db71d1d5e96f8d2bf134303c1d"}, + {file = "coverage-7.4.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:c0a120238dd71c68484f02562f6d446d736adcc6ca0993712289b102705a9a3a"}, + {file = "coverage-7.4.3-cp312-cp312-win32.whl", hash = "sha256:37389611ba54fd6d278fde86eb2c013c8e50232e38f5c68235d09d0a3f8aa352"}, + {file = "coverage-7.4.3-cp312-cp312-win_amd64.whl", hash = "sha256:d25b937a5d9ffa857d41be042b4238dd61db888533b53bc76dc082cb5a15e914"}, + {file = "coverage-7.4.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:28ca2098939eabab044ad68850aac8f8db6bf0b29bc7f2887d05889b17346454"}, + {file = "coverage-7.4.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:280459f0a03cecbe8800786cdc23067a8fc64c0bd51dc614008d9c36e1659d7e"}, + {file = "coverage-7.4.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c0cdedd3500e0511eac1517bf560149764b7d8e65cb800d8bf1c63ebf39edd2"}, + {file = "coverage-7.4.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9a9babb9466fe1da12417a4aed923e90124a534736de6201794a3aea9d98484e"}, + {file = "coverage-7.4.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dec9de46a33cf2dd87a5254af095a409ea3bf952d85ad339751e7de6d962cde6"}, + {file = "coverage-7.4.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:16bae383a9cc5abab9bb05c10a3e5a52e0a788325dc9ba8499e821885928968c"}, + {file = "coverage-7.4.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:2c854ce44e1ee31bda4e318af1dbcfc929026d12c5ed030095ad98197eeeaed0"}, + {file = "coverage-7.4.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:ce8c50520f57ec57aa21a63ea4f325c7b657386b3f02ccaedeccf9ebe27686e1"}, + {file = "coverage-7.4.3-cp38-cp38-win32.whl", hash = "sha256:708a3369dcf055c00ddeeaa2b20f0dd1ce664eeabde6623e516c5228b753654f"}, + {file = "coverage-7.4.3-cp38-cp38-win_amd64.whl", hash = "sha256:1bf25fbca0c8d121a3e92a2a0555c7e5bc981aee5c3fdaf4bb7809f410f696b9"}, + {file = "coverage-7.4.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3b253094dbe1b431d3a4ac2f053b6d7ede2664ac559705a704f621742e034f1f"}, + {file = "coverage-7.4.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:77fbfc5720cceac9c200054b9fab50cb2a7d79660609200ab83f5db96162d20c"}, + {file = "coverage-7.4.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6679060424faa9c11808598504c3ab472de4531c571ab2befa32f4971835788e"}, + {file = "coverage-7.4.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4af154d617c875b52651dd8dd17a31270c495082f3d55f6128e7629658d63765"}, + {file = "coverage-7.4.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8640f1fde5e1b8e3439fe482cdc2b0bb6c329f4bb161927c28d2e8879c6029ee"}, + {file = "coverage-7.4.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:69b9f6f66c0af29642e73a520b6fed25ff9fd69a25975ebe6acb297234eda501"}, + {file = "coverage-7.4.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:0842571634f39016a6c03e9d4aba502be652a6e4455fadb73cd3a3a49173e38f"}, + {file = "coverage-7.4.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a78ed23b08e8ab524551f52953a8a05d61c3a760781762aac49f8de6eede8c45"}, + {file = "coverage-7.4.3-cp39-cp39-win32.whl", hash = "sha256:c0524de3ff096e15fcbfe8f056fdb4ea0bf497d584454f344d59fce069d3e6e9"}, + {file = "coverage-7.4.3-cp39-cp39-win_amd64.whl", hash = "sha256:0209a6369ccce576b43bb227dc8322d8ef9e323d089c6f3f26a597b09cb4d2aa"}, + {file = "coverage-7.4.3-pp38.pp39.pp310-none-any.whl", hash = "sha256:7cbde573904625509a3f37b6fecea974e363460b556a627c60dc2f47e2fffa51"}, + {file = "coverage-7.4.3.tar.gz", hash = "sha256:276f6077a5c61447a48d133ed13e759c09e62aff0dc84274a68dc18660104d52"}, ] [package.dependencies] From 7a25d922a46b8b28acca811529790f0d3b0c2d4e Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Fri, 23 Feb 2024 18:19:56 -0800 Subject: [PATCH 042/112] move stac_model up with gh actions, readme, templates --- stac_model/.dockerignore => .dockerignore | 0 stac_model/.editorconfig => .editorconfig | 0 {stac_model/.github => .github}/.stale.yml | 2 +- .../ISSUE_TEMPLATE/bug_report.md | 12 +- .../ISSUE_TEMPLATE/config.yml | 0 .../ISSUE_TEMPLATE/feature_request.md | 0 .../ISSUE_TEMPLATE/question.md | 2 +- .../PULL_REQUEST_TEMPLATE.md | 1 - .../.github => .github}/dependabot.yml | 0 .../.github => .github}/release-drafter.yml | 0 .../.github => .github}/workflows/build.yml | 0 .../workflows/greetings.yml | 6 +- .../workflows/release-drafter.yml | 0 .gitignore | 1033 +++++++++++++++++ ...mit-config.yaml => .pre-commit-config.yaml | 0 ...TRIBUTING.md => CONTRIBUTING_STAC_MODEL.md | 2 +- stac_model/Makefile => Makefile | 0 README.md | 10 +- stac_model/README.md => STAC_MODEL_README.md | 0 {stac_model/docker => docker}/Dockerfile | 0 {stac_model/docker => docker}/README.md | 0 stac_model/poetry.lock => poetry.lock | 0 stac_model/pyproject.toml => pyproject.toml | 0 stac_model/.gitignore | 1032 ---------------- stac_model/AUTHORS.md | 9 - stac_model/CHANGELOG.md | 0 stac_model/LICENSE | 207 ---- stac_model/SECURITY.md | 29 - stac_model/{stac_model => }/__init__.py | 0 stac_model/{stac_model => }/__main__.py | 0 stac_model/docs/.gitkeep | 0 stac_model/example.json | 159 --- stac_model/{stac_model => }/input.py | 0 stac_model/{stac_model => }/output.py | 0 stac_model/{stac_model => }/paths.py | 0 stac_model/requirements.txt | 12 - stac_model/{stac_model => }/runtime.py | 0 stac_model/{stac_model => }/schema.py | 0 {stac_model/tests => tests}/test_schema.py | 0 39 files changed, 1047 insertions(+), 1469 deletions(-) rename stac_model/.dockerignore => .dockerignore (100%) rename stac_model/.editorconfig => .editorconfig (100%) rename {stac_model/.github => .github}/.stale.yml (96%) rename {stac_model/.github => .github}/ISSUE_TEMPLATE/bug_report.md (77%) rename {stac_model/.github => .github}/ISSUE_TEMPLATE/config.yml (100%) rename {stac_model/.github => .github}/ISSUE_TEMPLATE/feature_request.md (100%) rename {stac_model/.github => .github}/ISSUE_TEMPLATE/question.md (91%) rename {stac_model/.github => .github}/PULL_REQUEST_TEMPLATE.md (95%) rename {stac_model/.github => .github}/dependabot.yml (100%) rename {stac_model/.github => .github}/release-drafter.yml (100%) rename {stac_model/.github => .github}/workflows/build.yml (100%) rename {stac_model/.github => .github}/workflows/greetings.yml (51%) rename {stac_model/.github => .github}/workflows/release-drafter.yml (100%) rename stac_model/.pre-commit-config.yaml => .pre-commit-config.yaml (100%) rename stac_model/CONTRIBUTING.md => CONTRIBUTING_STAC_MODEL.md (98%) rename stac_model/Makefile => Makefile (100%) rename stac_model/README.md => STAC_MODEL_README.md (100%) rename {stac_model/docker => docker}/Dockerfile (100%) rename {stac_model/docker => docker}/README.md (100%) rename stac_model/poetry.lock => poetry.lock (100%) rename stac_model/pyproject.toml => pyproject.toml (100%) delete mode 100644 stac_model/.gitignore delete mode 100644 stac_model/AUTHORS.md delete mode 100644 stac_model/CHANGELOG.md delete mode 100644 stac_model/LICENSE delete mode 100644 stac_model/SECURITY.md rename stac_model/{stac_model => }/__init__.py (100%) rename stac_model/{stac_model => }/__main__.py (100%) delete mode 100644 stac_model/docs/.gitkeep delete mode 100644 stac_model/example.json rename stac_model/{stac_model => }/input.py (100%) rename stac_model/{stac_model => }/output.py (100%) rename stac_model/{stac_model => }/paths.py (100%) delete mode 100644 stac_model/requirements.txt rename stac_model/{stac_model => }/runtime.py (100%) rename stac_model/{stac_model => }/schema.py (100%) rename {stac_model/tests => tests}/test_schema.py (100%) diff --git a/stac_model/.dockerignore b/.dockerignore similarity index 100% rename from stac_model/.dockerignore rename to .dockerignore diff --git a/stac_model/.editorconfig b/.editorconfig similarity index 100% rename from stac_model/.editorconfig rename to .editorconfig diff --git a/stac_model/.github/.stale.yml b/.github/.stale.yml similarity index 96% rename from stac_model/.github/.stale.yml rename to .github/.stale.yml index 159f419..7b81464 100644 --- a/stac_model/.github/.stale.yml +++ b/.github/.stale.yml @@ -11,7 +11,7 @@ staleLabel: stale # Comment to post when marking an issue as stale. Set to `false` to disable markComment: > This issue has been automatically marked as stale because it has not had - recent activity. It will be closed if no further activity occurs. Thank you + recent activity. It will be closed if no further activity occurs in 30 days. Thank you for your contributions. # Comment to post when closing a stale issue. Set to `false` to disable closeComment: false diff --git a/stac_model/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md similarity index 77% rename from stac_model/.github/ISSUE_TEMPLATE/bug_report.md rename to .github/ISSUE_TEMPLATE/bug_report.md index 996ff26..236ee7b 100644 --- a/stac_model/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -12,7 +12,7 @@ assignees: ## :microscope: How To Reproduce -Steps to reproduce the behaviour: +Steps to reproduce the behavior: 1. ... @@ -23,15 +23,9 @@ Steps to reproduce the behaviour: ### Environment * OS: [e.g. Linux / Windows / macOS] -* Python version, get it with: +* Python version +* stac-model version -```bash -python --version -``` - -### Screenshots - - ## :chart_with_upwards_trend: Expected behavior diff --git a/stac_model/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml similarity index 100% rename from stac_model/.github/ISSUE_TEMPLATE/config.yml rename to .github/ISSUE_TEMPLATE/config.yml diff --git a/stac_model/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md similarity index 100% rename from stac_model/.github/ISSUE_TEMPLATE/feature_request.md rename to .github/ISSUE_TEMPLATE/feature_request.md diff --git a/stac_model/.github/ISSUE_TEMPLATE/question.md b/.github/ISSUE_TEMPLATE/question.md similarity index 91% rename from stac_model/.github/ISSUE_TEMPLATE/question.md rename to .github/ISSUE_TEMPLATE/question.md index 6ac7668..bf287fd 100644 --- a/stac_model/.github/ISSUE_TEMPLATE/question.md +++ b/.github/ISSUE_TEMPLATE/question.md @@ -25,4 +25,4 @@ Is it possible to [...]? -[1]: https://github.com/rbavery/stac-model/issues +[1]: https://github.com/crim-ca/stac-model/issues diff --git a/stac_model/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md similarity index 95% rename from stac_model/.github/PULL_REQUEST_TEMPLATE.md rename to .github/PULL_REQUEST_TEMPLATE.md index 5e3bd6d..c088256 100644 --- a/stac_model/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -21,7 +21,6 @@ -- [ ] I've read the [`CODE_OF_CONDUCT.md`][1] document; - [ ] I've read the [`CONTRIBUTING.md`][2] guide; - [ ] I've updated the code style using `make codestyle`; - [ ] I've written tests for all new methods and classes that I created; diff --git a/stac_model/.github/dependabot.yml b/.github/dependabot.yml similarity index 100% rename from stac_model/.github/dependabot.yml rename to .github/dependabot.yml diff --git a/stac_model/.github/release-drafter.yml b/.github/release-drafter.yml similarity index 100% rename from stac_model/.github/release-drafter.yml rename to .github/release-drafter.yml diff --git a/stac_model/.github/workflows/build.yml b/.github/workflows/build.yml similarity index 100% rename from stac_model/.github/workflows/build.yml rename to .github/workflows/build.yml diff --git a/stac_model/.github/workflows/greetings.yml b/.github/workflows/greetings.yml similarity index 51% rename from stac_model/.github/workflows/greetings.yml rename to .github/workflows/greetings.yml index a1f6e89..19f1ce0 100644 --- a/stac_model/.github/workflows/greetings.yml +++ b/.github/workflows/greetings.yml @@ -9,8 +9,8 @@ jobs: - uses: actions/first-interaction@v1 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - pr-message: 'Hello @${{ github.actor }}, thank you for submitting a PR! We will respond as soon as possible.' + pr-message: 'Hello @${{ github.actor }}, thank you for submitting a PR!' issue-message: | - Hello @${{ github.actor }}, thank you for your interest in our work! + Hello @${{ github.actor }}, thank you for submitting an issue! - If this is a bug report, please provide screenshots and **minimum viable code to reproduce your issue**, otherwise we can not help you. + If this is a bug report, please provide **minimum viable code to reproduce your issue**. diff --git a/stac_model/.github/workflows/release-drafter.yml b/.github/workflows/release-drafter.yml similarity index 100% rename from stac_model/.github/workflows/release-drafter.yml rename to .github/workflows/release-drafter.yml diff --git a/.gitignore b/.gitignore index 654b340..98270d4 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,1036 @@ /package-lock.json /node_modules .vscode + +### ArchLinuxPackages ### +*.tar +*.tar.* +*.jar +*.exe +*.msi +*.zip +*.tgz +*.log +*.log.* +*.sig + +pkg/ +src/ + +### C ### +# Prerequisites +*.d + +# Object files +*.o +*.ko +*.obj +*.elf + +# Linker output +*.ilk +*.map +*.exp + +# Precompiled Headers +*.gch +*.pch + +# Libraries +*.lib +*.a +*.la +*.lo + +# Shared objects (inc. Windows DLLs) +*.dll +*.so +*.so.* +*.dylib + +# Executables +*.out +*.app +*.i*86 +*.x86_64 +*.hex + +# Debug files +*.dSYM/ +*.su +*.idb +*.pdb + +# Kernel Module Compile Results +*.mod* +*.cmd +.tmp_versions/ +modules.order +Module.symvers +Mkfile.old +dkms.conf + +### certificates ### +*.pem +*.key +*.crt +*.cer +*.der +*.priv + +### Database ### +*.accdb +*.db +*.dbf +*.mdb +*.sqlite3 +*.db-shm +*.db-wal + +### Diff ### +*.patch +*.diff + +### Django ### +*.pot +*.pyc +__pycache__/ +local_settings.py +db.sqlite3 +db.sqlite3-journal +media + +# If your build process includes running collectstatic, then you probably don't need or want to include staticfiles/ +# in your Git repository. Update and uncomment the following line accordingly. +# /staticfiles/ + +### Django.Python Stack ### +# Byte-compiled / optimized / DLL files +*.py[cod] +*$py.class + +# C extensions + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo + +# Django stuff: + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/#use-with-ide +.pdm.toml + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ + +### Git ### +# Created by git for backups. To disable backups in Git: +# $ git config --global mergetool.keepBackup false +*.orig + +# Created by git when using merge tools for conflicts +*.BACKUP.* +*.BASE.* +*.LOCAL.* +*.REMOTE.* +*_BACKUP_*.txt +*_BASE_*.txt +*_LOCAL_*.txt +*_REMOTE_*.txt + +### Linux ### +*~ + +# temporary files which can be created if a process still has a handle open of a deleted file +.fuse_hidden* + +# KDE directory preferences +.directory + +# Linux trash folder which might appear on any partition or disk +.Trash-* + +# .nfs files are created when an open file is removed but is still being accessed +.nfs* + +### MicrosoftOffice ### +*.tmp + +# Word temporary +~$*.doc* + +# Word Auto Backup File +Backup of *.doc* + +# Excel temporary +~$*.xls* + +# Excel Backup File +*.xlk + +# PowerPoint temporary +~$*.ppt* + +# Visio autosave temporary files +*.~vsd* + +### OSX ### +# General +.DS_Store +.AppleDouble +.LSOverride + +# Icon must end with two \r +Icon + +# Thumbnails +._* + +# Files that might appear in the root of a volume +.DocumentRevisions-V100 +.fseventsd +.Spotlight-V100 +.TemporaryItems +.Trashes +.VolumeIcon.icns +.com.apple.timemachine.donotpresent + +# Directories potentially created on remote AFP share +.AppleDB +.AppleDesktop +Network Trash Folder +Temporary Items +.apdisk + +### PyCharm ### +# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider +# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 + +# User-specific stuff +.idea/**/workspace.xml +.idea/**/tasks.xml +.idea/**/usage.statistics.xml +.idea/**/dictionaries +.idea/**/shelf + +# AWS User-specific +.idea/**/aws.xml + +# Generated files +.idea/**/contentModel.xml + +# Sensitive or high-churn files +.idea/**/dataSources/ +.idea/**/dataSources.ids +.idea/**/dataSources.local.xml +.idea/**/sqlDataSources.xml +.idea/**/dynamic.xml +.idea/**/uiDesigner.xml +.idea/**/dbnavigator.xml + +# Gradle +.idea/**/gradle.xml +.idea/**/libraries + +# Gradle and Maven with auto-import +# When using Gradle or Maven with auto-import, you should exclude module files, +# since they will be recreated, and may cause churn. Uncomment if using +# auto-import. +# .idea/artifacts +# .idea/compiler.xml +# .idea/jarRepositories.xml +# .idea/modules.xml +# .idea/*.iml +# .idea/modules +# *.iml +# *.ipr + +# CMake +cmake-build-*/ + +# Mongo Explorer plugin +.idea/**/mongoSettings.xml + +# File-based project format +*.iws + +# IntelliJ +out/ + +# mpeltonen/sbt-idea plugin +.idea_modules/ + +# JIRA plugin +atlassian-ide-plugin.xml + +# Cursive Clojure plugin +.idea/replstate.xml + +# SonarLint plugin +.idea/sonarlint/ + +# Crashlytics plugin (for Android Studio and IntelliJ) +com_crashlytics_export_strings.xml +crashlytics.properties +crashlytics-build.properties +fabric.properties + +# Editor-based Rest Client +.idea/httpRequests + +# Android studio 3.1+ serialized cache file +.idea/caches/build_file_checksums.ser + +### PyCharm Patch ### +# Comment Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-215987721 + +# *.iml +# modules.xml +# .idea/misc.xml +# *.ipr + +# Sonarlint plugin +# https://plugins.jetbrains.com/plugin/7973-sonarlint +.idea/**/sonarlint/ + +# SonarQube Plugin +# https://plugins.jetbrains.com/plugin/7238-sonarqube-community-plugin +.idea/**/sonarIssues.xml + +# Markdown Navigator plugin +# https://plugins.jetbrains.com/plugin/7896-markdown-navigator-enhanced +.idea/**/markdown-navigator.xml +.idea/**/markdown-navigator-enh.xml +.idea/**/markdown-navigator/ + +# Cache file creation bug +# See https://youtrack.jetbrains.com/issue/JBR-2257 +.idea/$CACHE_FILE$ + +# CodeStream plugin +# https://plugins.jetbrains.com/plugin/12206-codestream +.idea/codestream.xml + +# Azure Toolkit for IntelliJ plugin +# https://plugins.jetbrains.com/plugin/8053-azure-toolkit-for-intellij +.idea/**/azureSettings.xml + +### Python ### +# Byte-compiled / optimized / DLL files + +# C extensions + +# Distribution / packaging + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. + +# Installer logs + +# Unit test / coverage reports + +# Translations + +# Django stuff: + +# Flask stuff: + +# Scrapy stuff: + +# Sphinx documentation + +# PyBuilder + +# Jupyter Notebook + +# IPython + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/#use-with-ide + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm + +# Celery stuff + +# SageMath parsed files + +# Environments + +# Spyder project settings + +# Rope project settings + +# mkdocs documentation + +# mypy + +# Pyre type checker + +# pytype static type analyzer + +# Cython debug symbols + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. + +### Python Patch ### +# Poetry local configuration file - https://python-poetry.org/docs/configuration/#local-configuration +poetry.toml + +# ruff +.ruff_cache/ + +# LSP config files +pyrightconfig.json + +### Spreadsheet ### +*.xlr +*.xls +*.xlsx + +### SSH ### +**/.ssh/id_* +**/.ssh/*_id_* +**/.ssh/known_hosts + +### Vim ### +# Swap +[._]*.s[a-v][a-z] +!*.svg # comment out if you don't need vector files +[._]*.sw[a-p] +[._]s[a-rt-v][a-z] +[._]ss[a-gi-z] +[._]sw[a-p] + +# Session +Session.vim +Sessionx.vim + +# Temporary +.netrwhist +# Auto-generated tag files +tags +# Persistent undo +[._]*.un~ + +### VisualStudioCode ### +.vscode/* +!.vscode/settings.json +!.vscode/tasks.json +!.vscode/launch.json +!.vscode/extensions.json +!.vscode/*.code-snippets + +# Local History for Visual Studio Code +.history/ + +# Built Visual Studio Code Extensions +*.vsix + +### VisualStudioCode Patch ### +# Ignore all local history of files +.history +.ionide + +### Windows ### +# Windows thumbnail cache files +Thumbs.db +Thumbs.db:encryptable +ehthumbs.db +ehthumbs_vista.db + +# Dump file +*.stackdump + +# Folder config file +[Dd]esktop.ini + +# Recycle Bin used on file shares +$RECYCLE.BIN/ + +# Windows Installer files +*.cab +*.msix +*.msm +*.msp + +# Windows shortcuts +*.lnk + +### Zsh ### +# Zsh compiled script + zrecompile backup +*.zwc +*.zwc.old + +# Zsh completion-optimization dumpfile +*zcompdump* + +# Zsh history +.zsh_history + +# Zsh sessions +.zsh_sessions + +# Zsh zcalc history +.zcalc_history + +# A popular plugin manager's files +._zinit +.zinit_lstupd + +# zdharma/zshelldoc tool's files +zsdoc/data + +# robbyrussell/oh-my-zsh/plugins/per-directory-history plugin's files +# (when set-up to store the history in the local directory) +.directory_history + +# MichaelAquilina/zsh-autoswitch-virtualenv plugin's files +# (for Zsh plugins using Python) + +# Zunit tests' output +/tests/_output/* +!/tests/_output/.gitkeep + +### VisualStudio ### +## Ignore Visual Studio temporary files, build results, and +## files generated by popular Visual Studio add-ons. +## +## Get latest from https://github.com/github/gitignore/blob/main/VisualStudio.gitignore + +# User-specific files +*.rsuser +*.suo +*.user +*.userosscache +*.sln.docstates + +# User-specific files (MonoDevelop/Xamarin Studio) +*.userprefs + +# Mono auto generated files +mono_crash.* + +# Build results +[Dd]ebug/ +[Dd]ebugPublic/ +[Rr]elease/ +[Rr]eleases/ +x64/ +x86/ +[Ww][Ii][Nn]32/ +[Aa][Rr][Mm]/ +[Aa][Rr][Mm]64/ +bld/ +[Bb]in/ +[Oo]bj/ +[Ll]og/ +[Ll]ogs/ + +# Visual Studio 2015/2017 cache/options directory +.vs/ +# Uncomment if you have tasks that create the project's static files in wwwroot +#wwwroot/ + +# Visual Studio 2017 auto generated files +Generated\ Files/ + +# MSTest test Results +[Tt]est[Rr]esult*/ +[Bb]uild[Ll]og.* + +# NUnit +*.VisualState.xml +TestResult.xml +nunit-*.xml + +# Build Results of an ATL Project +[Dd]ebugPS/ +[Rr]eleasePS/ +dlldata.c + +# Benchmark Results +BenchmarkDotNet.Artifacts/ + +# .NET Core +project.lock.json +project.fragment.lock.json +artifacts/ + +# ASP.NET Scaffolding +ScaffoldingReadMe.txt + +# StyleCop +StyleCopReport.xml + +# Files built by Visual Studio +*_i.c +*_p.c +*_h.h +*.meta +*.iobj +*.ipdb +*.pgc +*.pgd +*.rsp +*.sbr +*.tlb +*.tli +*.tlh +*.tmp_proj +*_wpftmp.csproj +*.tlog +*.vspscc +*.vssscc +.builds +*.pidb +*.svclog +*.scc + +# Chutzpah Test files +_Chutzpah* + +# Visual C++ cache files +ipch/ +*.aps +*.ncb +*.opendb +*.opensdf +*.sdf +*.cachefile +*.VC.db +*.VC.VC.opendb + +# Visual Studio profiler +*.psess +*.vsp +*.vspx +*.sap + +# Visual Studio Trace Files +*.e2e + +# TFS 2012 Local Workspace +$tf/ + +# Guidance Automation Toolkit +*.gpState + +# ReSharper is a .NET coding add-in +_ReSharper*/ +*.[Rr]e[Ss]harper +*.DotSettings.user + +# TeamCity is a build add-in +_TeamCity* + +# DotCover is a Code Coverage Tool +*.dotCover + +# AxoCover is a Code Coverage Tool +.axoCover/* +!.axoCover/settings.json + +# Coverlet is a free, cross platform Code Coverage Tool +coverage*.json +coverage*.xml +coverage*.info + +# Visual Studio code coverage results +*.coverage +*.coveragexml + +# NCrunch +_NCrunch_* +.*crunch*.local.xml +nCrunchTemp_* + +# MightyMoose +*.mm.* +AutoTest.Net/ + +# Web workbench (sass) +.sass-cache/ + +# Installshield output folder +[Ee]xpress/ + +# DocProject is a documentation generator add-in +DocProject/buildhelp/ +DocProject/Help/*.HxT +DocProject/Help/*.HxC +DocProject/Help/*.hhc +DocProject/Help/*.hhk +DocProject/Help/*.hhp +DocProject/Help/Html2 +DocProject/Help/html + +# Click-Once directory +publish/ + +# Publish Web Output +*.[Pp]ublish.xml +*.azurePubxml +# Note: Comment the next line if you want to checkin your web deploy settings, +# but database connection strings (with potential passwords) will be unencrypted +*.pubxml +*.publishproj + +# Microsoft Azure Web App publish settings. Comment the next line if you want to +# checkin your Azure Web App publish settings, but sensitive information contained +# in these scripts will be unencrypted +PublishScripts/ + +# NuGet Packages +*.nupkg +# NuGet Symbol Packages +*.snupkg +# The packages folder can be ignored because of Package Restore +**/[Pp]ackages/* +# except build/, which is used as an MSBuild target. +!**/[Pp]ackages/build/ +# Uncomment if necessary however generally it will be regenerated when needed +#!**/[Pp]ackages/repositories.config +# NuGet v3's project.json files produces more ignorable files +*.nuget.props +*.nuget.targets + +# Microsoft Azure Build Output +csx/ +*.build.csdef + +# Microsoft Azure Emulator +ecf/ +rcf/ + +# Windows Store app package directories and files +AppPackages/ +BundleArtifacts/ +Package.StoreAssociation.xml +_pkginfo.txt +*.appx +*.appxbundle +*.appxupload + +# Visual Studio cache files +# files ending in .cache can be ignored +*.[Cc]ache +# but keep track of directories ending in .cache +!?*.[Cc]ache/ + +# Others +ClientBin/ +~$* +*.dbmdl +*.dbproj.schemaview +*.jfm +*.pfx +*.publishsettings +orleans.codegen.cs + +# Including strong name files can present a security risk +# (https://github.com/github/gitignore/pull/2483#issue-259490424) +#*.snk + +# Since there are multiple workflows, uncomment next line to ignore bower_components +# (https://github.com/github/gitignore/pull/1529#issuecomment-104372622) +#bower_components/ + +# RIA/Silverlight projects +Generated_Code/ + +# Backup & report files from converting an old project file +# to a newer Visual Studio version. Backup files are not needed, +# because we have git ;-) +_UpgradeReport_Files/ +Backup*/ +UpgradeLog*.XML +UpgradeLog*.htm +ServiceFabricBackup/ +*.rptproj.bak + +# SQL Server files +*.mdf +*.ldf +*.ndf + +# Business Intelligence projects +*.rdl.data +*.bim.layout +*.bim_*.settings +*.rptproj.rsuser +*- [Bb]ackup.rdl +*- [Bb]ackup ([0-9]).rdl +*- [Bb]ackup ([0-9][0-9]).rdl + +# Microsoft Fakes +FakesAssemblies/ + +# GhostDoc plugin setting file +*.GhostDoc.xml + +# Node.js Tools for Visual Studio +.ntvs_analysis.dat +node_modules/ + +# Visual Studio 6 build log +*.plg + +# Visual Studio 6 workspace options file +*.opt + +# Visual Studio 6 auto-generated workspace file (contains which files were open etc.) +*.vbw + +# Visual Studio 6 auto-generated project file (contains which files were open etc.) +*.vbp + +# Visual Studio 6 workspace and project file (working project files containing files to include in project) +*.dsw +*.dsp + +# Visual Studio 6 technical files + +# Visual Studio LightSwitch build output +**/*.HTMLClient/GeneratedArtifacts +**/*.DesktopClient/GeneratedArtifacts +**/*.DesktopClient/ModelManifest.xml +**/*.Server/GeneratedArtifacts +**/*.Server/ModelManifest.xml +_Pvt_Extensions + +# Paket dependency manager +.paket/paket.exe +paket-files/ + +# FAKE - F# Make +.fake/ + +# CodeRush personal settings +.cr/personal + +# Python Tools for Visual Studio (PTVS) + +# Cake - Uncomment if you are using it +# tools/** +# !tools/packages.config + +# Tabs Studio +*.tss + +# Telerik's JustMock configuration file +*.jmconfig + +# BizTalk build output +*.btp.cs +*.btm.cs +*.odx.cs +*.xsd.cs + +# OpenCover UI analysis results +OpenCover/ + +# Azure Stream Analytics local run output +ASALocalRun/ + +# MSBuild Binary and Structured Log +*.binlog + +# NVidia Nsight GPU debugger configuration file +*.nvuser + +# MFractors (Xamarin productivity tool) working folder +.mfractor/ + +# Local History for Visual Studio +.localhistory/ + +# Visual Studio History (VSHistory) files +.vshistory/ + +# BeatPulse healthcheck temp database +healthchecksdb + +# Backup folder for Package Reference Convert tool in Visual Studio 2017 +MigrationBackup/ + +# Ionide (cross platform F# VS Code tools) working folder +.ionide/ + +# Fody - auto-generated XML schema +FodyWeavers.xsd + +# VS Code files for those working on multiple tools +*.code-workspace + +# Local History for Visual Studio Code + +# Windows Installer files from build outputs + +# JetBrains Rider +*.sln.iml + +### VisualStudio Patch ### +# Additional files built by Visual Studio + +# End of https://www.toptal.com/developers/gitignore/api/linux,archlinuxpackages,osx,windows,python,c,django,database,pycharm,visualstudio,visualstudiocode,vim,zsh,git,diff,microsoftoffice,spreadsheet,ssh,certificates diff --git a/stac_model/.pre-commit-config.yaml b/.pre-commit-config.yaml similarity index 100% rename from stac_model/.pre-commit-config.yaml rename to .pre-commit-config.yaml diff --git a/stac_model/CONTRIBUTING.md b/CONTRIBUTING_STAC_MODEL.md similarity index 98% rename from stac_model/CONTRIBUTING.md rename to CONTRIBUTING_STAC_MODEL.md index 89eda26..f9522f7 100644 --- a/stac_model/CONTRIBUTING.md +++ b/CONTRIBUTING_STAC_MODEL.md @@ -1,4 +1,4 @@ -# How to contribute +# How to contribute to stac-model ### Project setup diff --git a/stac_model/Makefile b/Makefile similarity index 100% rename from stac_model/Makefile rename to Makefile diff --git a/README.md b/README.md index 71d291d..c682776 100644 --- a/README.md +++ b/README.md @@ -204,11 +204,11 @@ STAC Collections and Items published with the model described by this extension. #### Result Array Object -| Field Name | Type | Description | | -|------------|-----------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--| -| shape | [integer] | **REQUIRED.** Shape of the n-dimensional result array ($N \times H \times W$), possibly including a batch size dimension. The batch size dimension must either be greater than 0 or -1 to indicate an unspecified batch dimension size. | | -| dim_names | [string] | **REQUIRED.** The names of the above dimensions of the result array, ordered the same as this object's `shape` field. | | -| data_type | enum | **REQUIRED.** The data type of values in the n-dimensional array. For model outputs, this should be the data type of the result of the model inference without extra post processing. Use one of the [common metadata data types](https://github.com/radiantearth/stac-spec/blob/f9b3c59ba810541c9da70c5f8d39635f8cba7bcd/item-spec/common-metadata.md#data-types). | | +| Field Name | Type | Description | +|------------|-----------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| shape | [integer] | **REQUIRED.** Shape of the n-dimensional result array ($N \times H \times W$), possibly including a batch size dimension. The batch size dimension must either be greater than 0 or -1 to indicate an unspecified batch dimension size. | +| dim_names | [string] | **REQUIRED.** The names of the above dimensions of the result array, ordered the same as this object's `shape` field. | +| data_type | enum | **REQUIRED.** The data type of values in the n-dimensional array. For model outputs, this should be the data type of the result of the model inference without extra post processing. Use one of the [common metadata data types](https://github.com/radiantearth/stac-spec/blob/f9b3c59ba810541c9da70c5f8d39635f8cba7bcd/item-spec/common-metadata.md#data-types). | diff --git a/stac_model/README.md b/STAC_MODEL_README.md similarity index 100% rename from stac_model/README.md rename to STAC_MODEL_README.md diff --git a/stac_model/docker/Dockerfile b/docker/Dockerfile similarity index 100% rename from stac_model/docker/Dockerfile rename to docker/Dockerfile diff --git a/stac_model/docker/README.md b/docker/README.md similarity index 100% rename from stac_model/docker/README.md rename to docker/README.md diff --git a/stac_model/poetry.lock b/poetry.lock similarity index 100% rename from stac_model/poetry.lock rename to poetry.lock diff --git a/stac_model/pyproject.toml b/pyproject.toml similarity index 100% rename from stac_model/pyproject.toml rename to pyproject.toml diff --git a/stac_model/.gitignore b/stac_model/.gitignore deleted file mode 100644 index e4eed7d..0000000 --- a/stac_model/.gitignore +++ /dev/null @@ -1,1032 +0,0 @@ -### ArchLinuxPackages ### -*.tar -*.tar.* -*.jar -*.exe -*.msi -*.zip -*.tgz -*.log -*.log.* -*.sig - -pkg/ -src/ - -### C ### -# Prerequisites -*.d - -# Object files -*.o -*.ko -*.obj -*.elf - -# Linker output -*.ilk -*.map -*.exp - -# Precompiled Headers -*.gch -*.pch - -# Libraries -*.lib -*.a -*.la -*.lo - -# Shared objects (inc. Windows DLLs) -*.dll -*.so -*.so.* -*.dylib - -# Executables -*.out -*.app -*.i*86 -*.x86_64 -*.hex - -# Debug files -*.dSYM/ -*.su -*.idb -*.pdb - -# Kernel Module Compile Results -*.mod* -*.cmd -.tmp_versions/ -modules.order -Module.symvers -Mkfile.old -dkms.conf - -### certificates ### -*.pem -*.key -*.crt -*.cer -*.der -*.priv - -### Database ### -*.accdb -*.db -*.dbf -*.mdb -*.sqlite3 -*.db-shm -*.db-wal - -### Diff ### -*.patch -*.diff - -### Django ### -*.pot -*.pyc -__pycache__/ -local_settings.py -db.sqlite3 -db.sqlite3-journal -media - -# If your build process includes running collectstatic, then you probably don't need or want to include staticfiles/ -# in your Git repository. Update and uncomment the following line accordingly. -# /staticfiles/ - -### Django.Python Stack ### -# Byte-compiled / optimized / DLL files -*.py[cod] -*$py.class - -# C extensions - -# Distribution / packaging -.Python -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -share/python-wheels/ -*.egg-info/ -.installed.cfg -*.egg -MANIFEST - -# PyInstaller -# Usually these files are written by a python script from a template -# before PyInstaller builds the exe, so as to inject date/other infos into it. -*.manifest -*.spec - -# Installer logs -pip-log.txt -pip-delete-this-directory.txt - -# Unit test / coverage reports -htmlcov/ -.tox/ -.nox/ -.coverage -.coverage.* -.cache -nosetests.xml -coverage.xml -*.cover -*.py,cover -.hypothesis/ -.pytest_cache/ -cover/ - -# Translations -*.mo - -# Django stuff: - -# Flask stuff: -instance/ -.webassets-cache - -# Scrapy stuff: -.scrapy - -# Sphinx documentation -docs/_build/ - -# PyBuilder -.pybuilder/ -target/ - -# Jupyter Notebook -.ipynb_checkpoints - -# IPython -profile_default/ -ipython_config.py - -# pyenv -# For a library or package, you might want to ignore these files since the code is -# intended to run in multiple environments; otherwise, check them in: -# .python-version - -# pipenv -# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. -# However, in case of collaboration, if having platform-specific dependencies or dependencies -# having no cross-platform support, pipenv may install dependencies that don't work, or not -# install all needed dependencies. -#Pipfile.lock - -# poetry -# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. -# This is especially recommended for binary packages to ensure reproducibility, and is more -# commonly ignored for libraries. -# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control -#poetry.lock - -# pdm -# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. -#pdm.lock -# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it -# in version control. -# https://pdm.fming.dev/#use-with-ide -.pdm.toml - -# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm -__pypackages__/ - -# Celery stuff -celerybeat-schedule -celerybeat.pid - -# SageMath parsed files -*.sage.py - -# Environments -.env -.venv -env/ -venv/ -ENV/ -env.bak/ -venv.bak/ - -# Spyder project settings -.spyderproject -.spyproject - -# Rope project settings -.ropeproject - -# mkdocs documentation -/site - -# mypy -.mypy_cache/ -.dmypy.json -dmypy.json - -# Pyre type checker -.pyre/ - -# pytype static type analyzer -.pytype/ - -# Cython debug symbols -cython_debug/ - -# PyCharm -# JetBrains specific template is maintained in a separate JetBrains.gitignore that can -# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore -# and can be added to the global gitignore or merged into this file. For a more nuclear -# option (not recommended) you can uncomment the following to ignore the entire idea folder. -#.idea/ - -### Git ### -# Created by git for backups. To disable backups in Git: -# $ git config --global mergetool.keepBackup false -*.orig - -# Created by git when using merge tools for conflicts -*.BACKUP.* -*.BASE.* -*.LOCAL.* -*.REMOTE.* -*_BACKUP_*.txt -*_BASE_*.txt -*_LOCAL_*.txt -*_REMOTE_*.txt - -### Linux ### -*~ - -# temporary files which can be created if a process still has a handle open of a deleted file -.fuse_hidden* - -# KDE directory preferences -.directory - -# Linux trash folder which might appear on any partition or disk -.Trash-* - -# .nfs files are created when an open file is removed but is still being accessed -.nfs* - -### MicrosoftOffice ### -*.tmp - -# Word temporary -~$*.doc* - -# Word Auto Backup File -Backup of *.doc* - -# Excel temporary -~$*.xls* - -# Excel Backup File -*.xlk - -# PowerPoint temporary -~$*.ppt* - -# Visio autosave temporary files -*.~vsd* - -### OSX ### -# General -.DS_Store -.AppleDouble -.LSOverride - -# Icon must end with two \r -Icon - -# Thumbnails -._* - -# Files that might appear in the root of a volume -.DocumentRevisions-V100 -.fseventsd -.Spotlight-V100 -.TemporaryItems -.Trashes -.VolumeIcon.icns -.com.apple.timemachine.donotpresent - -# Directories potentially created on remote AFP share -.AppleDB -.AppleDesktop -Network Trash Folder -Temporary Items -.apdisk - -### PyCharm ### -# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider -# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 - -# User-specific stuff -.idea/**/workspace.xml -.idea/**/tasks.xml -.idea/**/usage.statistics.xml -.idea/**/dictionaries -.idea/**/shelf - -# AWS User-specific -.idea/**/aws.xml - -# Generated files -.idea/**/contentModel.xml - -# Sensitive or high-churn files -.idea/**/dataSources/ -.idea/**/dataSources.ids -.idea/**/dataSources.local.xml -.idea/**/sqlDataSources.xml -.idea/**/dynamic.xml -.idea/**/uiDesigner.xml -.idea/**/dbnavigator.xml - -# Gradle -.idea/**/gradle.xml -.idea/**/libraries - -# Gradle and Maven with auto-import -# When using Gradle or Maven with auto-import, you should exclude module files, -# since they will be recreated, and may cause churn. Uncomment if using -# auto-import. -# .idea/artifacts -# .idea/compiler.xml -# .idea/jarRepositories.xml -# .idea/modules.xml -# .idea/*.iml -# .idea/modules -# *.iml -# *.ipr - -# CMake -cmake-build-*/ - -# Mongo Explorer plugin -.idea/**/mongoSettings.xml - -# File-based project format -*.iws - -# IntelliJ -out/ - -# mpeltonen/sbt-idea plugin -.idea_modules/ - -# JIRA plugin -atlassian-ide-plugin.xml - -# Cursive Clojure plugin -.idea/replstate.xml - -# SonarLint plugin -.idea/sonarlint/ - -# Crashlytics plugin (for Android Studio and IntelliJ) -com_crashlytics_export_strings.xml -crashlytics.properties -crashlytics-build.properties -fabric.properties - -# Editor-based Rest Client -.idea/httpRequests - -# Android studio 3.1+ serialized cache file -.idea/caches/build_file_checksums.ser - -### PyCharm Patch ### -# Comment Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-215987721 - -# *.iml -# modules.xml -# .idea/misc.xml -# *.ipr - -# Sonarlint plugin -# https://plugins.jetbrains.com/plugin/7973-sonarlint -.idea/**/sonarlint/ - -# SonarQube Plugin -# https://plugins.jetbrains.com/plugin/7238-sonarqube-community-plugin -.idea/**/sonarIssues.xml - -# Markdown Navigator plugin -# https://plugins.jetbrains.com/plugin/7896-markdown-navigator-enhanced -.idea/**/markdown-navigator.xml -.idea/**/markdown-navigator-enh.xml -.idea/**/markdown-navigator/ - -# Cache file creation bug -# See https://youtrack.jetbrains.com/issue/JBR-2257 -.idea/$CACHE_FILE$ - -# CodeStream plugin -# https://plugins.jetbrains.com/plugin/12206-codestream -.idea/codestream.xml - -# Azure Toolkit for IntelliJ plugin -# https://plugins.jetbrains.com/plugin/8053-azure-toolkit-for-intellij -.idea/**/azureSettings.xml - -### Python ### -# Byte-compiled / optimized / DLL files - -# C extensions - -# Distribution / packaging - -# PyInstaller -# Usually these files are written by a python script from a template -# before PyInstaller builds the exe, so as to inject date/other infos into it. - -# Installer logs - -# Unit test / coverage reports - -# Translations - -# Django stuff: - -# Flask stuff: - -# Scrapy stuff: - -# Sphinx documentation - -# PyBuilder - -# Jupyter Notebook - -# IPython - -# pyenv -# For a library or package, you might want to ignore these files since the code is -# intended to run in multiple environments; otherwise, check them in: -# .python-version - -# pipenv -# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. -# However, in case of collaboration, if having platform-specific dependencies or dependencies -# having no cross-platform support, pipenv may install dependencies that don't work, or not -# install all needed dependencies. - -# poetry -# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. -# This is especially recommended for binary packages to ensure reproducibility, and is more -# commonly ignored for libraries. -# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control - -# pdm -# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. -# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it -# in version control. -# https://pdm.fming.dev/#use-with-ide - -# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm - -# Celery stuff - -# SageMath parsed files - -# Environments - -# Spyder project settings - -# Rope project settings - -# mkdocs documentation - -# mypy - -# Pyre type checker - -# pytype static type analyzer - -# Cython debug symbols - -# PyCharm -# JetBrains specific template is maintained in a separate JetBrains.gitignore that can -# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore -# and can be added to the global gitignore or merged into this file. For a more nuclear -# option (not recommended) you can uncomment the following to ignore the entire idea folder. - -### Python Patch ### -# Poetry local configuration file - https://python-poetry.org/docs/configuration/#local-configuration -poetry.toml - -# ruff -.ruff_cache/ - -# LSP config files -pyrightconfig.json - -### Spreadsheet ### -*.xlr -*.xls -*.xlsx - -### SSH ### -**/.ssh/id_* -**/.ssh/*_id_* -**/.ssh/known_hosts - -### Vim ### -# Swap -[._]*.s[a-v][a-z] -!*.svg # comment out if you don't need vector files -[._]*.sw[a-p] -[._]s[a-rt-v][a-z] -[._]ss[a-gi-z] -[._]sw[a-p] - -# Session -Session.vim -Sessionx.vim - -# Temporary -.netrwhist -# Auto-generated tag files -tags -# Persistent undo -[._]*.un~ - -### VisualStudioCode ### -.vscode/* -!.vscode/settings.json -!.vscode/tasks.json -!.vscode/launch.json -!.vscode/extensions.json -!.vscode/*.code-snippets - -# Local History for Visual Studio Code -.history/ - -# Built Visual Studio Code Extensions -*.vsix - -### VisualStudioCode Patch ### -# Ignore all local history of files -.history -.ionide - -### Windows ### -# Windows thumbnail cache files -Thumbs.db -Thumbs.db:encryptable -ehthumbs.db -ehthumbs_vista.db - -# Dump file -*.stackdump - -# Folder config file -[Dd]esktop.ini - -# Recycle Bin used on file shares -$RECYCLE.BIN/ - -# Windows Installer files -*.cab -*.msix -*.msm -*.msp - -# Windows shortcuts -*.lnk - -### Zsh ### -# Zsh compiled script + zrecompile backup -*.zwc -*.zwc.old - -# Zsh completion-optimization dumpfile -*zcompdump* - -# Zsh history -.zsh_history - -# Zsh sessions -.zsh_sessions - -# Zsh zcalc history -.zcalc_history - -# A popular plugin manager's files -._zinit -.zinit_lstupd - -# zdharma/zshelldoc tool's files -zsdoc/data - -# robbyrussell/oh-my-zsh/plugins/per-directory-history plugin's files -# (when set-up to store the history in the local directory) -.directory_history - -# MichaelAquilina/zsh-autoswitch-virtualenv plugin's files -# (for Zsh plugins using Python) - -# Zunit tests' output -/tests/_output/* -!/tests/_output/.gitkeep - -### VisualStudio ### -## Ignore Visual Studio temporary files, build results, and -## files generated by popular Visual Studio add-ons. -## -## Get latest from https://github.com/github/gitignore/blob/main/VisualStudio.gitignore - -# User-specific files -*.rsuser -*.suo -*.user -*.userosscache -*.sln.docstates - -# User-specific files (MonoDevelop/Xamarin Studio) -*.userprefs - -# Mono auto generated files -mono_crash.* - -# Build results -[Dd]ebug/ -[Dd]ebugPublic/ -[Rr]elease/ -[Rr]eleases/ -x64/ -x86/ -[Ww][Ii][Nn]32/ -[Aa][Rr][Mm]/ -[Aa][Rr][Mm]64/ -bld/ -[Bb]in/ -[Oo]bj/ -[Ll]og/ -[Ll]ogs/ - -# Visual Studio 2015/2017 cache/options directory -.vs/ -# Uncomment if you have tasks that create the project's static files in wwwroot -#wwwroot/ - -# Visual Studio 2017 auto generated files -Generated\ Files/ - -# MSTest test Results -[Tt]est[Rr]esult*/ -[Bb]uild[Ll]og.* - -# NUnit -*.VisualState.xml -TestResult.xml -nunit-*.xml - -# Build Results of an ATL Project -[Dd]ebugPS/ -[Rr]eleasePS/ -dlldata.c - -# Benchmark Results -BenchmarkDotNet.Artifacts/ - -# .NET Core -project.lock.json -project.fragment.lock.json -artifacts/ - -# ASP.NET Scaffolding -ScaffoldingReadMe.txt - -# StyleCop -StyleCopReport.xml - -# Files built by Visual Studio -*_i.c -*_p.c -*_h.h -*.meta -*.iobj -*.ipdb -*.pgc -*.pgd -*.rsp -*.sbr -*.tlb -*.tli -*.tlh -*.tmp_proj -*_wpftmp.csproj -*.tlog -*.vspscc -*.vssscc -.builds -*.pidb -*.svclog -*.scc - -# Chutzpah Test files -_Chutzpah* - -# Visual C++ cache files -ipch/ -*.aps -*.ncb -*.opendb -*.opensdf -*.sdf -*.cachefile -*.VC.db -*.VC.VC.opendb - -# Visual Studio profiler -*.psess -*.vsp -*.vspx -*.sap - -# Visual Studio Trace Files -*.e2e - -# TFS 2012 Local Workspace -$tf/ - -# Guidance Automation Toolkit -*.gpState - -# ReSharper is a .NET coding add-in -_ReSharper*/ -*.[Rr]e[Ss]harper -*.DotSettings.user - -# TeamCity is a build add-in -_TeamCity* - -# DotCover is a Code Coverage Tool -*.dotCover - -# AxoCover is a Code Coverage Tool -.axoCover/* -!.axoCover/settings.json - -# Coverlet is a free, cross platform Code Coverage Tool -coverage*.json -coverage*.xml -coverage*.info - -# Visual Studio code coverage results -*.coverage -*.coveragexml - -# NCrunch -_NCrunch_* -.*crunch*.local.xml -nCrunchTemp_* - -# MightyMoose -*.mm.* -AutoTest.Net/ - -# Web workbench (sass) -.sass-cache/ - -# Installshield output folder -[Ee]xpress/ - -# DocProject is a documentation generator add-in -DocProject/buildhelp/ -DocProject/Help/*.HxT -DocProject/Help/*.HxC -DocProject/Help/*.hhc -DocProject/Help/*.hhk -DocProject/Help/*.hhp -DocProject/Help/Html2 -DocProject/Help/html - -# Click-Once directory -publish/ - -# Publish Web Output -*.[Pp]ublish.xml -*.azurePubxml -# Note: Comment the next line if you want to checkin your web deploy settings, -# but database connection strings (with potential passwords) will be unencrypted -*.pubxml -*.publishproj - -# Microsoft Azure Web App publish settings. Comment the next line if you want to -# checkin your Azure Web App publish settings, but sensitive information contained -# in these scripts will be unencrypted -PublishScripts/ - -# NuGet Packages -*.nupkg -# NuGet Symbol Packages -*.snupkg -# The packages folder can be ignored because of Package Restore -**/[Pp]ackages/* -# except build/, which is used as an MSBuild target. -!**/[Pp]ackages/build/ -# Uncomment if necessary however generally it will be regenerated when needed -#!**/[Pp]ackages/repositories.config -# NuGet v3's project.json files produces more ignorable files -*.nuget.props -*.nuget.targets - -# Microsoft Azure Build Output -csx/ -*.build.csdef - -# Microsoft Azure Emulator -ecf/ -rcf/ - -# Windows Store app package directories and files -AppPackages/ -BundleArtifacts/ -Package.StoreAssociation.xml -_pkginfo.txt -*.appx -*.appxbundle -*.appxupload - -# Visual Studio cache files -# files ending in .cache can be ignored -*.[Cc]ache -# but keep track of directories ending in .cache -!?*.[Cc]ache/ - -# Others -ClientBin/ -~$* -*.dbmdl -*.dbproj.schemaview -*.jfm -*.pfx -*.publishsettings -orleans.codegen.cs - -# Including strong name files can present a security risk -# (https://github.com/github/gitignore/pull/2483#issue-259490424) -#*.snk - -# Since there are multiple workflows, uncomment next line to ignore bower_components -# (https://github.com/github/gitignore/pull/1529#issuecomment-104372622) -#bower_components/ - -# RIA/Silverlight projects -Generated_Code/ - -# Backup & report files from converting an old project file -# to a newer Visual Studio version. Backup files are not needed, -# because we have git ;-) -_UpgradeReport_Files/ -Backup*/ -UpgradeLog*.XML -UpgradeLog*.htm -ServiceFabricBackup/ -*.rptproj.bak - -# SQL Server files -*.mdf -*.ldf -*.ndf - -# Business Intelligence projects -*.rdl.data -*.bim.layout -*.bim_*.settings -*.rptproj.rsuser -*- [Bb]ackup.rdl -*- [Bb]ackup ([0-9]).rdl -*- [Bb]ackup ([0-9][0-9]).rdl - -# Microsoft Fakes -FakesAssemblies/ - -# GhostDoc plugin setting file -*.GhostDoc.xml - -# Node.js Tools for Visual Studio -.ntvs_analysis.dat -node_modules/ - -# Visual Studio 6 build log -*.plg - -# Visual Studio 6 workspace options file -*.opt - -# Visual Studio 6 auto-generated workspace file (contains which files were open etc.) -*.vbw - -# Visual Studio 6 auto-generated project file (contains which files were open etc.) -*.vbp - -# Visual Studio 6 workspace and project file (working project files containing files to include in project) -*.dsw -*.dsp - -# Visual Studio 6 technical files - -# Visual Studio LightSwitch build output -**/*.HTMLClient/GeneratedArtifacts -**/*.DesktopClient/GeneratedArtifacts -**/*.DesktopClient/ModelManifest.xml -**/*.Server/GeneratedArtifacts -**/*.Server/ModelManifest.xml -_Pvt_Extensions - -# Paket dependency manager -.paket/paket.exe -paket-files/ - -# FAKE - F# Make -.fake/ - -# CodeRush personal settings -.cr/personal - -# Python Tools for Visual Studio (PTVS) - -# Cake - Uncomment if you are using it -# tools/** -# !tools/packages.config - -# Tabs Studio -*.tss - -# Telerik's JustMock configuration file -*.jmconfig - -# BizTalk build output -*.btp.cs -*.btm.cs -*.odx.cs -*.xsd.cs - -# OpenCover UI analysis results -OpenCover/ - -# Azure Stream Analytics local run output -ASALocalRun/ - -# MSBuild Binary and Structured Log -*.binlog - -# NVidia Nsight GPU debugger configuration file -*.nvuser - -# MFractors (Xamarin productivity tool) working folder -.mfractor/ - -# Local History for Visual Studio -.localhistory/ - -# Visual Studio History (VSHistory) files -.vshistory/ - -# BeatPulse healthcheck temp database -healthchecksdb - -# Backup folder for Package Reference Convert tool in Visual Studio 2017 -MigrationBackup/ - -# Ionide (cross platform F# VS Code tools) working folder -.ionide/ - -# Fody - auto-generated XML schema -FodyWeavers.xsd - -# VS Code files for those working on multiple tools -*.code-workspace - -# Local History for Visual Studio Code - -# Windows Installer files from build outputs - -# JetBrains Rider -*.sln.iml - -### VisualStudio Patch ### -# Additional files built by Visual Studio - -# End of https://www.toptal.com/developers/gitignore/api/linux,archlinuxpackages,osx,windows,python,c,django,database,pycharm,visualstudio,visualstudiocode,vim,zsh,git,diff,microsoftoffice,spreadsheet,ssh,certificates diff --git a/stac_model/AUTHORS.md b/stac_model/AUTHORS.md deleted file mode 100644 index 7a4332f..0000000 --- a/stac_model/AUTHORS.md +++ /dev/null @@ -1,9 +0,0 @@ -# Credits - -## Main Developer - -- Ryan Avery - -## Contributors - -We don't have contributors... yet. Why not be the first? diff --git a/stac_model/CHANGELOG.md b/stac_model/CHANGELOG.md deleted file mode 100644 index e69de29..0000000 diff --git a/stac_model/LICENSE b/stac_model/LICENSE deleted file mode 100644 index 938525f..0000000 --- a/stac_model/LICENSE +++ /dev/null @@ -1,207 +0,0 @@ -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, and - distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by the - copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all other - entities that control, are controlled by, or are under common control with - that entity. For the purposes of this definition, "control" means (i) the - power, direct or indirect, to cause the direction or management of such - entity, whether by contract or otherwise, or (ii) ownership of - fifty percent (50%) or more of the outstanding shares, or (iii) beneficial - ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity exercising - permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation source, - and configuration files. - - "Object" form shall mean any form resulting from mechanical transformation - or translation of a Source form, including but not limited to compiled - object code, generated documentation, and conversions to - other media types. - - "Work" shall mean the work of authorship, whether in Source or Object - form, made available under the License, as indicated by a copyright notice - that is included in or attached to the work (an example is provided in the - Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object form, - that is based on (or derived from) the Work and for which the editorial - revisions, annotations, elaborations, or other modifications represent, - as a whole, an original work of authorship. For the purposes of this - License, Derivative Works shall not include works that remain separable - from, or merely link (or bind by name) to the interfaces of, the Work and - Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including the original - version of the Work and any modifications or additions to that Work or - Derivative Works thereof, that is intentionally submitted to Licensor for - inclusion in the Work by the copyright owner or by an individual or - Legal Entity authorized to submit on behalf of the copyright owner. - For the purposes of this definition, "submitted" means any form of - electronic, verbal, or written communication sent to the Licensor or its - representatives, including but not limited to communication on electronic - mailing lists, source code control systems, and issue tracking systems - that are managed by, or on behalf of, the Licensor for the purpose of - discussing and improving the Work, but excluding communication that is - conspicuously marked or otherwise designated in writing by the copyright - owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity on - behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. - - Subject to the terms and conditions of this License, each Contributor - hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, - royalty-free, irrevocable copyright license to reproduce, prepare - Derivative Works of, publicly display, publicly perform, sublicense, - and distribute the Work and such Derivative Works in - Source or Object form. - -3. Grant of Patent License. - - Subject to the terms and conditions of this License, each Contributor - hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, - royalty-free, irrevocable (except as stated in this section) patent - license to make, have made, use, offer to sell, sell, import, and - otherwise transfer the Work, where such license applies only to those - patent claims licensable by such Contributor that are necessarily - infringed by their Contribution(s) alone or by combination of their - Contribution(s) with the Work to which such Contribution(s) was submitted. - If You institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work or a - Contribution incorporated within the Work constitutes direct or - contributory patent infringement, then any patent licenses granted to - You under this License for that Work shall terminate as of the date such - litigation is filed. - -4. Redistribution. - - You may reproduce and distribute copies of the Work or Derivative Works - thereof in any medium, with or without modifications, and in Source or - Object form, provided that You meet the following conditions: - - 1. You must give any other recipients of the Work or Derivative Works a - copy of this License; and - - 2. You must cause any modified files to carry prominent notices stating - that You changed the files; and - - 3. You must retain, in the Source form of any Derivative Works that You - distribute, all copyright, patent, trademark, and attribution notices from - the Source form of the Work, excluding those notices that do not pertain - to any part of the Derivative Works; and - - 4. If the Work includes a "NOTICE" text file as part of its distribution, - then any Derivative Works that You distribute must include a readable copy - of the attribution notices contained within such NOTICE file, excluding - those notices that do not pertain to any part of the Derivative Works, - in at least one of the following places: within a NOTICE text file - distributed as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, within a - display generated by the Derivative Works, if and wherever such - third-party notices normally appear. The contents of the NOTICE file are - for informational purposes only and do not modify the License. - You may add Your own attribution notices within Derivative Works that You - distribute, alongside or as an addendum to the NOTICE text from the Work, - provided that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and may - provide additional or different license terms and conditions for use, - reproduction, or distribution of Your modifications, or for any such - Derivative Works as a whole, provided Your use, reproduction, and - distribution of the Work otherwise complies with the conditions - stated in this License. - -5. Submission of Contributions. - - Unless You explicitly state otherwise, any Contribution intentionally - submitted for inclusion in the Work by You to the Licensor shall be under - the terms and conditions of this License, without any additional - terms or conditions. Notwithstanding the above, nothing herein shall - supersede or modify the terms of any separate license agreement you may - have executed with Licensor regarding such Contributions. - -6. Trademarks. - - This License does not grant permission to use the trade names, trademarks, - service marks, or product names of the Licensor, except as required for - reasonable and customary use in describing the origin of the Work and - reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. - - Unless required by applicable law or agreed to in writing, Licensor - provides the Work (and each Contributor provides its Contributions) - on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - either express or implied, including, without limitation, any warranties - or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS - FOR A PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any risks - associated with Your exercise of permissions under this License. - -8. Limitation of Liability. - - In no event and under no legal theory, whether in tort - (including negligence), contract, or otherwise, unless required by - applicable law (such as deliberate and grossly negligent acts) or agreed - to in writing, shall any Contributor be liable to You for damages, - including any direct, indirect, special, incidental, or consequential - damages of any character arising as a result of this License or out of - the use or inability to use the Work (including but not limited to damages - for loss of goodwill, work stoppage, computer failure or malfunction, - or any and all other commercial damages or losses), even if such - Contributor has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. - - While redistributing the Work or Derivative Works thereof, You may choose - to offer, and charge a fee for, acceptance of support, warranty, - indemnity, or other liability obligations and/or rights consistent with - this License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf of any - other Contributor, and only if You agree to indemnify, defend, and hold - each Contributor harmless for any liability incurred by, or claims - asserted against, such Contributor by reason of your accepting any such - warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work - - To apply the Apache License to your work, attach the following boilerplate - notice, with the fields enclosed by brackets "[]" replaced with your own - identifying information. (Don't include the brackets!) The text should be - enclosed in the appropriate comment syntax for the file format. We also - recommend that a file or class name and description of purpose be included - on the same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2023 Ryan Avery - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - or implied. See the License for the specific language governing - permissions and limitations under the License. diff --git a/stac_model/SECURITY.md b/stac_model/SECURITY.md deleted file mode 100644 index 9ca2669..0000000 --- a/stac_model/SECURITY.md +++ /dev/null @@ -1,29 +0,0 @@ -# Security - -## :closed_lock_with_key: Reporting Security Issues - -> Do not open issues that might have security implications! -> It is critical that security related issues are reported privately so we have time to address them before they become public knowledge. - -Vulnerabilities can be reported by emailing core members: - -- Ryan Avery <[ryan@wherobots.com][1]> - -Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: - -- Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) -- Full paths of source file(s) related to the manifestation of the issue -- The location of the affected source code (tag/branch/commit or direct URL) -- Any special configuration required to reproduce the issue -- Environment (e.g. Linux / Windows / macOS) -- Step-by-step instructions to reproduce the issue -- Proof-of-concept or exploit code (if possible) -- Impact of the issue, including how an attacker might exploit the issue - -This information will help us triage your report more quickly. - -## Preferred Languages - -We prefer all communications to be in English. - -[1]: mailto:ryan@wherobots.com diff --git a/stac_model/stac_model/__init__.py b/stac_model/__init__.py similarity index 100% rename from stac_model/stac_model/__init__.py rename to stac_model/__init__.py diff --git a/stac_model/stac_model/__main__.py b/stac_model/__main__.py similarity index 100% rename from stac_model/stac_model/__main__.py rename to stac_model/__main__.py diff --git a/stac_model/docs/.gitkeep b/stac_model/docs/.gitkeep deleted file mode 100644 index e69de29..0000000 diff --git a/stac_model/example.json b/stac_model/example.json deleted file mode 100644 index ef32350..0000000 --- a/stac_model/example.json +++ /dev/null @@ -1,159 +0,0 @@ -{ - "mlm:name": "Resnet-18 Sentinel-2 ALL MOCO", - "mlm:task": "classification", - "mlm:framework": "pytorch", - "mlm:framework_version": "2.1.2+cu121", - "mlm:file_size": 1, - "mlm:memory_size": 1, - "mlm:input": [ - { - "name": "13 Band Sentinel-2 Batch", - "bands": [ - "B01", - "B02", - "B03", - "B04", - "B05", - "B06", - "B07", - "B08", - "B8A", - "B09", - "B10", - "B11", - "B12" - ], - "input_array": { - "shape": [ - -1, - 13, - 64, - 64 - ], - "dim_order": "bchw", - "data_type": "float32" - }, - "norm_by_channel": true, - "norm_type": "z_score", - "statistics": { - "mean": [ - 1354.40546513, - 1118.24399958, - 1042.92983953, - 947.62620298, - 1199.47283961, - 1999.79090914, - 2369.22292565, - 2296.82608323, - 732.08340178, - 12.11327804, - 1819.01027855, - 1118.92391149, - 2594.14080798 - ], - "stddev": [ - 245.71762908, - 333.00778264, - 395.09249139, - 593.75055589, - 566.4170017, - 861.18399006, - 1086.63139075, - 1117.98170791, - 404.91978886, - 4.77584468, - 1002.58768311, - 761.30323499, - 1231.58581042 - ] - }, - "pre_processing_function": "https://github.com/microsoft/torchgeo/blob/545abe8326efc2848feae69d0212a15faba3eb00/torchgeo/datamodules/eurosat.py" - } - ], - "mlm:output": [ - { - "task": "classification", - "result_array": [ - { - "shape": [ - -1, - 10 - ], - "dim_names": [ - "batch", - "class" - ], - "data_type": "float32" - } - ], - "classification_classes": [ - { - "value": 0, - "name": "Annual Crop", - "nodata": false - }, - { - "value": 1, - "name": "Forest", - "nodata": false - }, - { - "value": 2, - "name": "Herbaceous Vegetation", - "nodata": false - }, - { - "value": 3, - "name": "Highway", - "nodata": false - }, - { - "value": 4, - "name": "Industrial Buildings", - "nodata": false - }, - { - "value": 5, - "name": "Pasture", - "nodata": false - }, - { - "value": 6, - "name": "Permanent Crop", - "nodata": false - }, - { - "value": 7, - "name": "Residential Buildings", - "nodata": false - }, - { - "value": 8, - "name": "River", - "nodata": false - }, - { - "value": 9, - "name": "SeaLake", - "nodata": false - } - ] - } - ], - "mlm:runtime": [ - { - "asset": { - "href": "." - }, - "source_code": { - "href": "." - }, - "accelerator": "cuda", - "accelerator_constrained": false, - "hardware_summary": "Unknown" - } - ], - "mlm:total_parameters": 11700000, - "mlm:pretrained_source": "EuroSat Sentinel-2", - "mlm:summary": "Sourced from torchgeo python library,identifier is ResNet18_Weights.SENTINEL2_ALL_MOCO" -} diff --git a/stac_model/stac_model/input.py b/stac_model/input.py similarity index 100% rename from stac_model/stac_model/input.py rename to stac_model/input.py diff --git a/stac_model/stac_model/output.py b/stac_model/output.py similarity index 100% rename from stac_model/stac_model/output.py rename to stac_model/output.py diff --git a/stac_model/stac_model/paths.py b/stac_model/paths.py similarity index 100% rename from stac_model/stac_model/paths.py rename to stac_model/paths.py diff --git a/stac_model/requirements.txt b/stac_model/requirements.txt deleted file mode 100644 index 6613a77..0000000 --- a/stac_model/requirements.txt +++ /dev/null @@ -1,12 +0,0 @@ -annotated-types==0.6.0 ; python_version >= "3.10" and python_version < "4.0" -click==8.1.7 ; python_version >= "3.10" and python_version < "4.0" -colorama==0.4.6 ; python_version >= "3.10" and python_version < "4.0" -commonmark==0.9.1 ; python_version >= "3.10" and python_version < "4.0" -numpy==1.26.2 ; python_version >= "3.10" and python_version < "4.0" -pydantic-core==2.6.3 ; python_version >= "3.10" and python_version < "4.0" -pydantic==2.3.0 ; python_version >= "3.10" and python_version < "4.0" -pygments==2.17.2 ; python_version >= "3.10" and python_version < "4.0" -rich==12.6.0 ; python_version >= "3.10" and python_version < "4.0" -shellingham==1.5.4 ; python_version >= "3.10" and python_version < "4.0" -typer[all]==0.7.0 ; python_version >= "3.10" and python_version < "4.0" -typing-extensions==4.9.0 ; python_version >= "3.10" and python_version < "4.0" diff --git a/stac_model/stac_model/runtime.py b/stac_model/runtime.py similarity index 100% rename from stac_model/stac_model/runtime.py rename to stac_model/runtime.py diff --git a/stac_model/stac_model/schema.py b/stac_model/schema.py similarity index 100% rename from stac_model/stac_model/schema.py rename to stac_model/schema.py diff --git a/stac_model/tests/test_schema.py b/tests/test_schema.py similarity index 100% rename from stac_model/tests/test_schema.py rename to tests/test_schema.py From 54a52facdce6185cc79e65923449c657a4cf8d73 Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Sun, 25 Feb 2024 19:57:51 -0800 Subject: [PATCH 043/112] update test, make an examples module --- example.json | 159 +++++++++++++++++++++++++++++++++++++++++ poetry.lock | 8 +-- pyproject.toml | 4 +- stac_model/__main__.py | 135 ++-------------------------------- stac_model/examples.py | 132 ++++++++++++++++++++++++++++++++++ stac_model/runtime.py | 5 +- tests/test_schema.py | 58 ++------------- 7 files changed, 308 insertions(+), 193 deletions(-) create mode 100644 example.json create mode 100644 stac_model/examples.py diff --git a/example.json b/example.json new file mode 100644 index 0000000..ef32350 --- /dev/null +++ b/example.json @@ -0,0 +1,159 @@ +{ + "mlm:name": "Resnet-18 Sentinel-2 ALL MOCO", + "mlm:task": "classification", + "mlm:framework": "pytorch", + "mlm:framework_version": "2.1.2+cu121", + "mlm:file_size": 1, + "mlm:memory_size": 1, + "mlm:input": [ + { + "name": "13 Band Sentinel-2 Batch", + "bands": [ + "B01", + "B02", + "B03", + "B04", + "B05", + "B06", + "B07", + "B08", + "B8A", + "B09", + "B10", + "B11", + "B12" + ], + "input_array": { + "shape": [ + -1, + 13, + 64, + 64 + ], + "dim_order": "bchw", + "data_type": "float32" + }, + "norm_by_channel": true, + "norm_type": "z_score", + "statistics": { + "mean": [ + 1354.40546513, + 1118.24399958, + 1042.92983953, + 947.62620298, + 1199.47283961, + 1999.79090914, + 2369.22292565, + 2296.82608323, + 732.08340178, + 12.11327804, + 1819.01027855, + 1118.92391149, + 2594.14080798 + ], + "stddev": [ + 245.71762908, + 333.00778264, + 395.09249139, + 593.75055589, + 566.4170017, + 861.18399006, + 1086.63139075, + 1117.98170791, + 404.91978886, + 4.77584468, + 1002.58768311, + 761.30323499, + 1231.58581042 + ] + }, + "pre_processing_function": "https://github.com/microsoft/torchgeo/blob/545abe8326efc2848feae69d0212a15faba3eb00/torchgeo/datamodules/eurosat.py" + } + ], + "mlm:output": [ + { + "task": "classification", + "result_array": [ + { + "shape": [ + -1, + 10 + ], + "dim_names": [ + "batch", + "class" + ], + "data_type": "float32" + } + ], + "classification_classes": [ + { + "value": 0, + "name": "Annual Crop", + "nodata": false + }, + { + "value": 1, + "name": "Forest", + "nodata": false + }, + { + "value": 2, + "name": "Herbaceous Vegetation", + "nodata": false + }, + { + "value": 3, + "name": "Highway", + "nodata": false + }, + { + "value": 4, + "name": "Industrial Buildings", + "nodata": false + }, + { + "value": 5, + "name": "Pasture", + "nodata": false + }, + { + "value": 6, + "name": "Permanent Crop", + "nodata": false + }, + { + "value": 7, + "name": "Residential Buildings", + "nodata": false + }, + { + "value": 8, + "name": "River", + "nodata": false + }, + { + "value": 9, + "name": "SeaLake", + "nodata": false + } + ] + } + ], + "mlm:runtime": [ + { + "asset": { + "href": "." + }, + "source_code": { + "href": "." + }, + "accelerator": "cuda", + "accelerator_constrained": false, + "hardware_summary": "Unknown" + } + ], + "mlm:total_parameters": 11700000, + "mlm:pretrained_source": "EuroSat Sentinel-2", + "mlm:summary": "Sourced from torchgeo python library,identifier is ResNet18_Weights.SENTINEL2_ALL_MOCO" +} diff --git a/poetry.lock b/poetry.lock index 9c919a1..1cd5020 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1257,13 +1257,13 @@ test = ["black (>=22.3.0,<23.0.0)", "coverage (>=6.2,<7.0)", "isort (>=5.0.6,<6. [[package]] name = "typing-extensions" -version = "4.9.0" +version = "4.10.0" description = "Backported and Experimental Type Hints for Python 3.8+" optional = false python-versions = ">=3.8" files = [ - {file = "typing_extensions-4.9.0-py3-none-any.whl", hash = "sha256:af72aea155e91adfc61c3ae9e0e342dbc0cba726d6cba4b6c72c1f34e47291cd"}, - {file = "typing_extensions-4.9.0.tar.gz", hash = "sha256:23478f88c37f27d76ac8aee6c905017a143b0b1b886c3c9f66bc2fd94f9f5783"}, + {file = "typing_extensions-4.10.0-py3-none-any.whl", hash = "sha256:69b1a937c3a517342112fb4c6df7e72fc39a38e7891a5730ed4985b5214b5475"}, + {file = "typing_extensions-4.10.0.tar.gz", hash = "sha256:b0abd7c89e8fb96f98db18d86106ff1d90ab692004eb746cf6eda2682f91b3cb"}, ] [[package]] @@ -1306,4 +1306,4 @@ test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess [metadata] lock-version = "2.0" python-versions = "^3.10" -content-hash = "2257cef332438dffd08c915ab1b6ca0c6c456d5ac513cfff94896039e09b61fa" +content-hash = "4e55abe2ad0d4e1327f95a950f3ca8a4fefb7c8d02a95ac6da7bde435abe8e9a" diff --git a/pyproject.toml b/pyproject.toml index f206765..78ec295 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -55,8 +55,8 @@ python = "^3.10" typer = {extras = ["all"], version = "^0.9.0"} rich = "^13.7.0" -pydantic = "^2.5.0" # bug in post 2.3 https://github.com/pydantic/pydantic/issues/7720 -pydantic-core = "~2" +pydantic = "^2.6.2" # bug in post 2.3 https://github.com/pydantic/pydantic/issues/7720 +pydantic-core = "^2.16.3" numpy = "^1.26.2" # fastapi="^0.108.0" diff --git a/stac_model/__main__.py b/stac_model/__main__.py index 5034250..6eeb4bd 100644 --- a/stac_model/__main__.py +++ b/stac_model/__main__.py @@ -2,17 +2,7 @@ from rich.console import Console from stac_model import __version__ -from stac_model.schema import ( - Asset, - ClassObject, - InputArray, - MLModel, - ModelInput, - ModelOutput, - ResultArray, - Runtime, - Statistics, -) +from stac_model.examples import eurosat_resnet app = typer.Typer( name="stac-model", @@ -24,14 +14,12 @@ ) console = Console() - def version_callback(print_version: bool) -> None: """Print the version of the package.""" if print_version: console.print(f"[yellow]stac-model[/] version: [bold blue]{__version__}[/]") raise typer.Exit() - @app.command(name="") def main( print_version: bool = typer.Option( @@ -44,128 +32,13 @@ def main( ), ) -> None: """Generate example spec.""" - - input_array = InputArray( - shape=[-1, 13, 64, 64], dim_order="bchw", data_type="float32" - ) - band_names = [ - "B01", - "B02", - "B03", - "B04", - "B05", - "B06", - "B07", - "B08", - "B8A", - "B09", - "B10", - "B11", - "B12", - ] - stats = Statistics( - mean=[ - 1354.40546513, - 1118.24399958, - 1042.92983953, - 947.62620298, - 1199.47283961, - 1999.79090914, - 2369.22292565, - 2296.82608323, - 732.08340178, - 12.11327804, - 1819.01027855, - 1118.92391149, - 2594.14080798, - ], - stddev=[ - 245.71762908, - 333.00778264, - 395.09249139, - 593.75055589, - 566.4170017, - 861.18399006, - 1086.63139075, - 1117.98170791, - 404.91978886, - 4.77584468, - 1002.58768311, - 761.30323499, - 1231.58581042, - ], - ) - mlm_input = ModelInput( - name="13 Band Sentinel-2 Batch", - bands=band_names, - input_array=input_array, - norm_by_channel=True, - norm_type="z_score", - rescale_type="none", - statistics=stats, - pre_processing_function = "https://github.com/microsoft/torchgeo/blob/545abe8326efc2848feae69d0212a15faba3eb00/torchgeo/datamodules/eurosat.py" # noqa: E501 -, - ) - mlm_runtime = Runtime( - framework="torch", - version="2.1.2+cu121", - asset=Asset( - href="https://huggingface.co/torchgeo/resnet18_sentinel2_all_moco/resolve/main/resnet18_sentinel2_all_moco-59bfdff9.pth" # noqa: E501 - ), - source_code=Asset( - href="https://github.com/microsoft/torchgeo/blob/61efd2e2c4df7ebe3bd03002ebbaeaa3cfe9885a/torchgeo/models/resnet.py#L207" # noqa: E501 - ), - accelerator="cuda", - accelerator_constrained=False, - hardware_summary="Unknown", - ) - result_array = ResultArray( - shape=[-1, 10], dim_names=["batch", "class"], data_type="float32" - ) - class_map = { - "Annual Crop": 0, - "Forest": 1, - "Herbaceous Vegetation": 2, - "Highway": 3, - "Industrial Buildings": 4, - "Pasture": 5, - "Permanent Crop": 6, - "Residential Buildings": 7, - "River": 8, - "SeaLake": 9, - } - class_objects = [ - ClassObject(value=class_map[class_name], name=class_name) - for class_name in class_map - ] - mlm_output = ModelOutput( - task="classification", - classification_classes=class_objects, - output_shape=[-1, 10], - result_array=[result_array], - ) - ml_model_meta = MLModel( - mlm_name="Resnet-18 Sentinel-2 ALL MOCO", - mlm_task="classification", - mlm_framework="pytorch", - mlm_framework_version="2.1.2+cu121", - mlm_file_size=1, - mlm_memory_size=1, - mlm_summary=( - "Sourced from torchgeo python library," - "identifier is ResNet18_Weights.SENTINEL2_ALL_MOCO" - ), - mlm_pretrained_source="EuroSat Sentinel-2", - mlm_total_parameters=11_700_000, - mlm_input=[mlm_input], - mlm_runtime=[mlm_runtime], - mlm_output=[mlm_output], - ) + ml_model_meta = eurosat_resnet() json_str = ml_model_meta.model_dump_json(indent=2, exclude_none=True, by_alias=True) with open("example.json", "w") as file: file.write(json_str) print(ml_model_meta.model_dump_json(indent=2, exclude_none=True, by_alias=True)) - + print("Example model metadata written to ./example.json.") + return ml_model_meta if __name__ == "__main__": app() diff --git a/stac_model/examples.py b/stac_model/examples.py new file mode 100644 index 0000000..5a7d980 --- /dev/null +++ b/stac_model/examples.py @@ -0,0 +1,132 @@ +from stac_model.schema import ( + Asset, + ClassObject, + InputArray, + MLModel, + ModelInput, + ModelOutput, + ResultArray, + Runtime, + Statistics, +) + + +def eurosat_resnet(): + + input_array = InputArray( + shape=[-1, 13, 64, 64], dim_order="bchw", data_type="float32" + ) + band_names = [ + "B01", + "B02", + "B03", + "B04", + "B05", + "B06", + "B07", + "B08", + "B8A", + "B09", + "B10", + "B11", + "B12", + ] + stats = Statistics( + mean=[ + 1354.40546513, + 1118.24399958, + 1042.92983953, + 947.62620298, + 1199.47283961, + 1999.79090914, + 2369.22292565, + 2296.82608323, + 732.08340178, + 12.11327804, + 1819.01027855, + 1118.92391149, + 2594.14080798, + ], + stddev=[ + 245.71762908, + 333.00778264, + 395.09249139, + 593.75055589, + 566.4170017, + 861.18399006, + 1086.63139075, + 1117.98170791, + 404.91978886, + 4.77584468, + 1002.58768311, + 761.30323499, + 1231.58581042, + ], + ) + mlm_input = ModelInput( + name="13 Band Sentinel-2 Batch", + bands=band_names, + input_array=input_array, + norm_by_channel=True, + norm_type="z_score", + rescale_type="none", + statistics=stats, + pre_processing_function = "https://github.com/microsoft/torchgeo/blob/545abe8326efc2848feae69d0212a15faba3eb00/torchgeo/datamodules/eurosat.py" # noqa: E501 +, + ) + mlm_runtime = Runtime( + framework="torch", + version="2.1.2+cu121", + asset=Asset( + href="https://huggingface.co/torchgeo/resnet18_sentinel2_all_moco/resolve/main/resnet18_sentinel2_all_moco-59bfdff9.pth" # noqa: E501 + ), + source_code=Asset( + href="https://github.com/microsoft/torchgeo/blob/61efd2e2c4df7ebe3bd03002ebbaeaa3cfe9885a/torchgeo/models/resnet.py#L207" # noqa: E501 + ), + accelerator="cuda", + accelerator_constrained=False, + hardware_summary="Unknown", + ) + result_array = ResultArray( + shape=[-1, 10], dim_names=["batch", "class"], data_type="float32" + ) + class_map = { + "Annual Crop": 0, + "Forest": 1, + "Herbaceous Vegetation": 2, + "Highway": 3, + "Industrial Buildings": 4, + "Pasture": 5, + "Permanent Crop": 6, + "Residential Buildings": 7, + "River": 8, + "SeaLake": 9, + } + class_objects = [ + ClassObject(value=class_map[class_name], name=class_name) + for class_name in class_map + ] + mlm_output = ModelOutput( + task="classification", + classification_classes=class_objects, + output_shape=[-1, 10], + result_array=[result_array], + ) + ml_model_meta = MLModel( + mlm_name="Resnet-18 Sentinel-2 ALL MOCO", + mlm_task="classification", + mlm_framework="pytorch", + mlm_framework_version="2.1.2+cu121", + mlm_file_size=1, + mlm_memory_size=1, + mlm_summary=( + "Sourced from torchgeo python library," + "identifier is ResNet18_Weights.SENTINEL2_ALL_MOCO" + ), + mlm_pretrained_source="EuroSat Sentinel-2", + mlm_total_parameters=11_700_000, + mlm_input=[mlm_input], + mlm_runtime=[mlm_runtime], + mlm_output=[mlm_output], + ) + return ml_model_meta diff --git a/stac_model/runtime.py b/stac_model/runtime.py index 11ac146..2872abf 100644 --- a/stac_model/runtime.py +++ b/stac_model/runtime.py @@ -1,7 +1,7 @@ from enum import Enum from typing import List, Optional -from pydantic import BaseModel, FilePath, field_validator +from pydantic import BaseModel, ConfigDict, FilePath, field_validator from .paths import S3Path @@ -17,8 +17,7 @@ class Asset(BaseModel): type: Optional[str] = None roles: Optional[List[str]] = None - class Config: - arbitrary_types_allowed = True + model_config = ConfigDict(arbitrary_types_allowed = True) @field_validator("href") @classmethod diff --git a/tests/test_schema.py b/tests/test_schema.py index 4987c98..4e2d387 100644 --- a/tests/test_schema.py +++ b/tests/test_schema.py @@ -3,68 +3,20 @@ import pytest -from stac_model.schema import ( - ClassMap, - ModelArtifact, - ModelMetadata, - ModelSignature, - TensorSignature, -) - - -def create_metadata(): - input_sig = TensorSignature( - name="input_tensor", dtype="float32", shape=(-1, 13, 64, 64) - ) - output_sig = TensorSignature(name="output_tensor", dtype="float32", shape=(-1, 10)) - model_sig = ModelSignature(inputs=[input_sig], outputs=[output_sig]) - model_artifact = ModelArtifact(path="s3://example/s3/uri/model.pt") - class_map = ClassMap( - class_to_label_id={ - "Annual Crop": 0, - "Forest": 1, - "Herbaceous Vegetation": 2, - "Highway": 3, - "Industrial Buildings": 4, - "Pasture": 5, - "Permanent Crop": 6, - "Residential Buildings": 7, - "River": 8, - "SeaLake": 9, - } - ) - return ModelMetadata( - name="eurosat", - class_map=class_map, - signatures=model_sig, - artifact=model_artifact, - ml_model_processor_type="cpu", - ) - @pytest.fixture def metadata_json(): - model_metadata = create_metadata() + from stac_model.examples import eurosat_resnet + model_metadata = eurosat_resnet() return model_metadata.model_dump_json(indent=2) - def test_model_metadata_json_operations(metadata_json): - # Use a temporary directory + from stac_model.schema import MLModel with tempfile.TemporaryDirectory() as temp_dir: temp_filepath = os.path.join(temp_dir, "tempfile.json") - - # Write to the file with open(temp_filepath, "w") as file: file.write(metadata_json) - - # Read and validate the model metadata from the JSON file with open(temp_filepath) as json_file: json_str = json_file.read() - model_metadata = ModelMetadata.model_validate_json(json_str) - - assert model_metadata.name == "eurosat" - - -def test_benchmark_model_metadata_validation(benchmark): - json_str = create_metadata().model_dump_json(indent=2) - benchmark(ModelMetadata.model_validate_json, json_str) + model_metadata = MLModel.model_validate_json(json_str) + assert model_metadata.name == "Resnet-18 Sentinel-2 ALL MOCO" From 41cf8eaa768f41e988ddd693688fc9d795d58cf6 Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Sun, 25 Feb 2024 20:01:10 -0800 Subject: [PATCH 044/112] simplify test --- tests/test_schema.py | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/tests/test_schema.py b/tests/test_schema.py index 4e2d387..8259cb0 100644 --- a/tests/test_schema.py +++ b/tests/test_schema.py @@ -1,5 +1,3 @@ -import os -import tempfile import pytest @@ -12,11 +10,5 @@ def metadata_json(): def test_model_metadata_json_operations(metadata_json): from stac_model.schema import MLModel - with tempfile.TemporaryDirectory() as temp_dir: - temp_filepath = os.path.join(temp_dir, "tempfile.json") - with open(temp_filepath, "w") as file: - file.write(metadata_json) - with open(temp_filepath) as json_file: - json_str = json_file.read() - model_metadata = MLModel.model_validate_json(json_str) + model_metadata = MLModel.model_validate_json(metadata_json) assert model_metadata.name == "Resnet-18 Sentinel-2 ALL MOCO" From 21d1aa9947befc8fb2a46458ef84e9d183d50f48 Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Sun, 25 Feb 2024 20:07:54 -0800 Subject: [PATCH 045/112] increment stac model version --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 78ec295..89c193a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,7 +7,7 @@ build-backend = "poetry.core.masonry.api" [tool.poetry] name = "stac-model" -version = "0.1.1.alpha2" +version = "0.1.1.alpha3" description = "A PydanticV2 validation and serialization libary for the STAC ML Model Extension" readme = "README.md" authors = ["Ryan Avery "] From 2e079016c1cd099487c2adba2335355f2bc64d91 Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Mon, 26 Feb 2024 10:52:33 -0800 Subject: [PATCH 046/112] optional annotations, downgrade pydantic --- poetry.lock | 201 +++++++++++++++++++++++------------------ pyproject.toml | 6 +- stac_model/examples.py | 2 +- stac_model/input.py | 5 +- stac_model/output.py | 12 +-- stac_model/schema.py | 4 +- tests/test_schema.py | 2 +- 7 files changed, 130 insertions(+), 102 deletions(-) diff --git a/poetry.lock b/poetry.lock index 1cd5020..3e3b246 100644 --- a/poetry.lock +++ b/poetry.lock @@ -598,18 +598,18 @@ files = [ [[package]] name = "pydantic" -version = "2.6.2" +version = "2.3.0" description = "Data validation using Python type hints" optional = false -python-versions = ">=3.8" +python-versions = ">=3.7" files = [ - {file = "pydantic-2.6.2-py3-none-any.whl", hash = "sha256:37a5432e54b12fecaa1049c5195f3d860a10e01bdfd24f1840ef14bd0d3aeab3"}, - {file = "pydantic-2.6.2.tar.gz", hash = "sha256:a09be1c3d28f3abe37f8a78af58284b236a92ce520105ddc91a6d29ea1176ba7"}, + {file = "pydantic-2.3.0-py3-none-any.whl", hash = "sha256:45b5e446c6dfaad9444819a293b921a40e1db1aa61ea08aede0522529ce90e81"}, + {file = "pydantic-2.3.0.tar.gz", hash = "sha256:1607cc106602284cd4a00882986570472f193fde9cb1259bceeaedb26aa79a6d"}, ] [package.dependencies] annotated-types = ">=0.4.0" -pydantic-core = "2.16.3" +pydantic-core = "2.6.3" typing-extensions = ">=4.6.1" [package.extras] @@ -617,90 +617,117 @@ email = ["email-validator (>=2.0.0)"] [[package]] name = "pydantic-core" -version = "2.16.3" +version = "2.6.3" description = "" optional = false -python-versions = ">=3.8" +python-versions = ">=3.7" files = [ - {file = "pydantic_core-2.16.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:75b81e678d1c1ede0785c7f46690621e4c6e63ccd9192af1f0bd9d504bbb6bf4"}, - {file = "pydantic_core-2.16.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9c865a7ee6f93783bd5d781af5a4c43dadc37053a5b42f7d18dc019f8c9d2bd1"}, - {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:162e498303d2b1c036b957a1278fa0899d02b2842f1ff901b6395104c5554a45"}, - {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2f583bd01bbfbff4eaee0868e6fc607efdfcc2b03c1c766b06a707abbc856187"}, - {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b926dd38db1519ed3043a4de50214e0d600d404099c3392f098a7f9d75029ff8"}, - {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:716b542728d4c742353448765aa7cdaa519a7b82f9564130e2b3f6766018c9ec"}, - {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc4ad7f7ee1a13d9cb49d8198cd7d7e3aa93e425f371a68235f784e99741561f"}, - {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bd87f48924f360e5d1c5f770d6155ce0e7d83f7b4e10c2f9ec001c73cf475c99"}, - {file = "pydantic_core-2.16.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0df446663464884297c793874573549229f9eca73b59360878f382a0fc085979"}, - {file = "pydantic_core-2.16.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4df8a199d9f6afc5ae9a65f8f95ee52cae389a8c6b20163762bde0426275b7db"}, - {file = "pydantic_core-2.16.3-cp310-none-win32.whl", hash = "sha256:456855f57b413f077dff513a5a28ed838dbbb15082ba00f80750377eed23d132"}, - {file = "pydantic_core-2.16.3-cp310-none-win_amd64.whl", hash = "sha256:732da3243e1b8d3eab8c6ae23ae6a58548849d2e4a4e03a1924c8ddf71a387cb"}, - {file = "pydantic_core-2.16.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:519ae0312616026bf4cedc0fe459e982734f3ca82ee8c7246c19b650b60a5ee4"}, - {file = "pydantic_core-2.16.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b3992a322a5617ded0a9f23fd06dbc1e4bd7cf39bc4ccf344b10f80af58beacd"}, - {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8d62da299c6ecb04df729e4b5c52dc0d53f4f8430b4492b93aa8de1f541c4aac"}, - {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2acca2be4bb2f2147ada8cac612f8a98fc09f41c89f87add7256ad27332c2fda"}, - {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1b662180108c55dfbf1280d865b2d116633d436cfc0bba82323554873967b340"}, - {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e7c6ed0dc9d8e65f24f5824291550139fe6f37fac03788d4580da0d33bc00c97"}, - {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a6b1bb0827f56654b4437955555dc3aeeebeddc47c2d7ed575477f082622c49e"}, - {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e56f8186d6210ac7ece503193ec84104da7ceb98f68ce18c07282fcc2452e76f"}, - {file = "pydantic_core-2.16.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:936e5db01dd49476fa8f4383c259b8b1303d5dd5fb34c97de194560698cc2c5e"}, - {file = "pydantic_core-2.16.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:33809aebac276089b78db106ee692bdc9044710e26f24a9a2eaa35a0f9fa70ba"}, - {file = "pydantic_core-2.16.3-cp311-none-win32.whl", hash = "sha256:ded1c35f15c9dea16ead9bffcde9bb5c7c031bff076355dc58dcb1cb436c4721"}, - {file = "pydantic_core-2.16.3-cp311-none-win_amd64.whl", hash = "sha256:d89ca19cdd0dd5f31606a9329e309d4fcbb3df860960acec32630297d61820df"}, - {file = "pydantic_core-2.16.3-cp311-none-win_arm64.whl", hash = "sha256:6162f8d2dc27ba21027f261e4fa26f8bcb3cf9784b7f9499466a311ac284b5b9"}, - {file = "pydantic_core-2.16.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:0f56ae86b60ea987ae8bcd6654a887238fd53d1384f9b222ac457070b7ac4cff"}, - {file = "pydantic_core-2.16.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c9bd22a2a639e26171068f8ebb5400ce2c1bc7d17959f60a3b753ae13c632975"}, - {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4204e773b4b408062960e65468d5346bdfe139247ee5f1ca2a378983e11388a2"}, - {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f651dd19363c632f4abe3480a7c87a9773be27cfe1341aef06e8759599454120"}, - {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aaf09e615a0bf98d406657e0008e4a8701b11481840be7d31755dc9f97c44053"}, - {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8e47755d8152c1ab5b55928ab422a76e2e7b22b5ed8e90a7d584268dd49e9c6b"}, - {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:500960cb3a0543a724a81ba859da816e8cf01b0e6aaeedf2c3775d12ee49cade"}, - {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cf6204fe865da605285c34cf1172879d0314ff267b1c35ff59de7154f35fdc2e"}, - {file = "pydantic_core-2.16.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d33dd21f572545649f90c38c227cc8631268ba25c460b5569abebdd0ec5974ca"}, - {file = "pydantic_core-2.16.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:49d5d58abd4b83fb8ce763be7794d09b2f50f10aa65c0f0c1696c677edeb7cbf"}, - {file = "pydantic_core-2.16.3-cp312-none-win32.whl", hash = "sha256:f53aace168a2a10582e570b7736cc5bef12cae9cf21775e3eafac597e8551fbe"}, - {file = "pydantic_core-2.16.3-cp312-none-win_amd64.whl", hash = "sha256:0d32576b1de5a30d9a97f300cc6a3f4694c428d956adbc7e6e2f9cad279e45ed"}, - {file = "pydantic_core-2.16.3-cp312-none-win_arm64.whl", hash = "sha256:ec08be75bb268473677edb83ba71e7e74b43c008e4a7b1907c6d57e940bf34b6"}, - {file = "pydantic_core-2.16.3-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:b1f6f5938d63c6139860f044e2538baeee6f0b251a1816e7adb6cbce106a1f01"}, - {file = "pydantic_core-2.16.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2a1ef6a36fdbf71538142ed604ad19b82f67b05749512e47f247a6ddd06afdc7"}, - {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:704d35ecc7e9c31d48926150afada60401c55efa3b46cd1ded5a01bdffaf1d48"}, - {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d937653a696465677ed583124b94a4b2d79f5e30b2c46115a68e482c6a591c8a"}, - {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c9803edf8e29bd825f43481f19c37f50d2b01899448273b3a7758441b512acf8"}, - {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:72282ad4892a9fb2da25defeac8c2e84352c108705c972db82ab121d15f14e6d"}, - {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f752826b5b8361193df55afcdf8ca6a57d0232653494ba473630a83ba50d8c9"}, - {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4384a8f68ddb31a0b0c3deae88765f5868a1b9148939c3f4121233314ad5532c"}, - {file = "pydantic_core-2.16.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:a4b2bf78342c40b3dc830880106f54328928ff03e357935ad26c7128bbd66ce8"}, - {file = "pydantic_core-2.16.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:13dcc4802961b5f843a9385fc821a0b0135e8c07fc3d9949fd49627c1a5e6ae5"}, - {file = "pydantic_core-2.16.3-cp38-none-win32.whl", hash = "sha256:e3e70c94a0c3841e6aa831edab1619ad5c511199be94d0c11ba75fe06efe107a"}, - {file = "pydantic_core-2.16.3-cp38-none-win_amd64.whl", hash = "sha256:ecdf6bf5f578615f2e985a5e1f6572e23aa632c4bd1dc67f8f406d445ac115ed"}, - {file = "pydantic_core-2.16.3-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:bda1ee3e08252b8d41fa5537413ffdddd58fa73107171a126d3b9ff001b9b820"}, - {file = "pydantic_core-2.16.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:21b888c973e4f26b7a96491c0965a8a312e13be108022ee510248fe379a5fa23"}, - {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:be0ec334369316fa73448cc8c982c01e5d2a81c95969d58b8f6e272884df0074"}, - {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b5b6079cc452a7c53dd378c6f881ac528246b3ac9aae0f8eef98498a75657805"}, - {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ee8d5f878dccb6d499ba4d30d757111847b6849ae07acdd1205fffa1fc1253c"}, - {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7233d65d9d651242a68801159763d09e9ec96e8a158dbf118dc090cd77a104c9"}, - {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c6119dc90483a5cb50a1306adb8d52c66e447da88ea44f323e0ae1a5fcb14256"}, - {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:578114bc803a4c1ff9946d977c221e4376620a46cf78da267d946397dc9514a8"}, - {file = "pydantic_core-2.16.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d8f99b147ff3fcf6b3cc60cb0c39ea443884d5559a30b1481e92495f2310ff2b"}, - {file = "pydantic_core-2.16.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4ac6b4ce1e7283d715c4b729d8f9dab9627586dafce81d9eaa009dd7f25dd972"}, - {file = "pydantic_core-2.16.3-cp39-none-win32.whl", hash = "sha256:e7774b570e61cb998490c5235740d475413a1f6de823169b4cf94e2fe9e9f6b2"}, - {file = "pydantic_core-2.16.3-cp39-none-win_amd64.whl", hash = "sha256:9091632a25b8b87b9a605ec0e61f241c456e9248bfdcf7abdf344fdb169c81cf"}, - {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:36fa178aacbc277bc6b62a2c3da95226520da4f4e9e206fdf076484363895d2c"}, - {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:dcca5d2bf65c6fb591fff92da03f94cd4f315972f97c21975398bd4bd046854a"}, - {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2a72fb9963cba4cd5793854fd12f4cfee731e86df140f59ff52a49b3552db241"}, - {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b60cc1a081f80a2105a59385b92d82278b15d80ebb3adb200542ae165cd7d183"}, - {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cbcc558401de90a746d02ef330c528f2e668c83350f045833543cd57ecead1ad"}, - {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:fee427241c2d9fb7192b658190f9f5fd6dfe41e02f3c1489d2ec1e6a5ab1e04a"}, - {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f4cb85f693044e0f71f394ff76c98ddc1bc0953e48c061725e540396d5c8a2e1"}, - {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:b29eeb887aa931c2fcef5aa515d9d176d25006794610c264ddc114c053bf96fe"}, - {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a425479ee40ff021f8216c9d07a6a3b54b31c8267c6e17aa88b70d7ebd0e5e5b"}, - {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:5c5cbc703168d1b7a838668998308018a2718c2130595e8e190220238addc96f"}, - {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:99b6add4c0b39a513d323d3b93bc173dac663c27b99860dd5bf491b240d26137"}, - {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75f76ee558751746d6a38f89d60b6228fa174e5172d143886af0f85aa306fd89"}, - {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:00ee1c97b5364b84cb0bd82e9bbf645d5e2871fb8c58059d158412fee2d33d8a"}, - {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:287073c66748f624be4cef893ef9174e3eb88fe0b8a78dc22e88eca4bc357ca6"}, - {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:ed25e1835c00a332cb10c683cd39da96a719ab1dfc08427d476bce41b92531fc"}, - {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:86b3d0033580bd6bbe07590152007275bd7af95f98eaa5bd36f3da219dcd93da"}, - {file = "pydantic_core-2.16.3.tar.gz", hash = "sha256:1cac689f80a3abab2d3c0048b29eea5751114054f032a941a32de4c852c59cad"}, + {file = "pydantic_core-2.6.3-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:1a0ddaa723c48af27d19f27f1c73bdc615c73686d763388c8683fe34ae777bad"}, + {file = "pydantic_core-2.6.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5cfde4fab34dd1e3a3f7f3db38182ab6c95e4ea91cf322242ee0be5c2f7e3d2f"}, + {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5493a7027bfc6b108e17c3383959485087d5942e87eb62bbac69829eae9bc1f7"}, + {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:84e87c16f582f5c753b7f39a71bd6647255512191be2d2dbf49458c4ef024588"}, + {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:522a9c4a4d1924facce7270c84b5134c5cabcb01513213662a2e89cf28c1d309"}, + {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aaafc776e5edc72b3cad1ccedb5fd869cc5c9a591f1213aa9eba31a781be9ac1"}, + {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a750a83b2728299ca12e003d73d1264ad0440f60f4fc9cee54acc489249b728"}, + {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9e8b374ef41ad5c461efb7a140ce4730661aadf85958b5c6a3e9cf4e040ff4bb"}, + {file = "pydantic_core-2.6.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b594b64e8568cf09ee5c9501ede37066b9fc41d83d58f55b9952e32141256acd"}, + {file = "pydantic_core-2.6.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:2a20c533cb80466c1d42a43a4521669ccad7cf2967830ac62c2c2f9cece63e7e"}, + {file = "pydantic_core-2.6.3-cp310-none-win32.whl", hash = "sha256:04fe5c0a43dec39aedba0ec9579001061d4653a9b53a1366b113aca4a3c05ca7"}, + {file = "pydantic_core-2.6.3-cp310-none-win_amd64.whl", hash = "sha256:6bf7d610ac8f0065a286002a23bcce241ea8248c71988bda538edcc90e0c39ad"}, + {file = "pydantic_core-2.6.3-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:6bcc1ad776fffe25ea5c187a028991c031a00ff92d012ca1cc4714087e575973"}, + {file = "pydantic_core-2.6.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:df14f6332834444b4a37685810216cc8fe1fe91f447332cd56294c984ecbff1c"}, + {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0b7486d85293f7f0bbc39b34e1d8aa26210b450bbd3d245ec3d732864009819"}, + {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a892b5b1871b301ce20d40b037ffbe33d1407a39639c2b05356acfef5536d26a"}, + {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:883daa467865e5766931e07eb20f3e8152324f0adf52658f4d302242c12e2c32"}, + {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d4eb77df2964b64ba190eee00b2312a1fd7a862af8918ec70fc2d6308f76ac64"}, + {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ce8c84051fa292a5dc54018a40e2a1926fd17980a9422c973e3ebea017aa8da"}, + {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:22134a4453bd59b7d1e895c455fe277af9d9d9fbbcb9dc3f4a97b8693e7e2c9b"}, + {file = "pydantic_core-2.6.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:02e1c385095efbd997311d85c6021d32369675c09bcbfff3b69d84e59dc103f6"}, + {file = "pydantic_core-2.6.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d79f1f2f7ebdb9b741296b69049ff44aedd95976bfee38eb4848820628a99b50"}, + {file = "pydantic_core-2.6.3-cp311-none-win32.whl", hash = "sha256:430ddd965ffd068dd70ef4e4d74f2c489c3a313adc28e829dd7262cc0d2dd1e8"}, + {file = "pydantic_core-2.6.3-cp311-none-win_amd64.whl", hash = "sha256:84f8bb34fe76c68c9d96b77c60cef093f5e660ef8e43a6cbfcd991017d375950"}, + {file = "pydantic_core-2.6.3-cp311-none-win_arm64.whl", hash = "sha256:5a2a3c9ef904dcdadb550eedf3291ec3f229431b0084666e2c2aa8ff99a103a2"}, + {file = "pydantic_core-2.6.3-cp312-cp312-macosx_10_7_x86_64.whl", hash = "sha256:8421cf496e746cf8d6b677502ed9a0d1e4e956586cd8b221e1312e0841c002d5"}, + {file = "pydantic_core-2.6.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:bb128c30cf1df0ab78166ded1ecf876620fb9aac84d2413e8ea1594b588c735d"}, + {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37a822f630712817b6ecc09ccc378192ef5ff12e2c9bae97eb5968a6cdf3b862"}, + {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:240a015102a0c0cc8114f1cba6444499a8a4d0333e178bc504a5c2196defd456"}, + {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3f90e5e3afb11268628c89f378f7a1ea3f2fe502a28af4192e30a6cdea1e7d5e"}, + {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:340e96c08de1069f3d022a85c2a8c63529fd88709468373b418f4cf2c949fb0e"}, + {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1480fa4682e8202b560dcdc9eeec1005f62a15742b813c88cdc01d44e85308e5"}, + {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f14546403c2a1d11a130b537dda28f07eb6c1805a43dae4617448074fd49c282"}, + {file = "pydantic_core-2.6.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:a87c54e72aa2ef30189dc74427421e074ab4561cf2bf314589f6af5b37f45e6d"}, + {file = "pydantic_core-2.6.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:f93255b3e4d64785554e544c1c76cd32f4a354fa79e2eeca5d16ac2e7fdd57aa"}, + {file = "pydantic_core-2.6.3-cp312-none-win32.whl", hash = "sha256:f70dc00a91311a1aea124e5f64569ea44c011b58433981313202c46bccbec0e1"}, + {file = "pydantic_core-2.6.3-cp312-none-win_amd64.whl", hash = "sha256:23470a23614c701b37252618e7851e595060a96a23016f9a084f3f92f5ed5881"}, + {file = "pydantic_core-2.6.3-cp312-none-win_arm64.whl", hash = "sha256:1ac1750df1b4339b543531ce793b8fd5c16660a95d13aecaab26b44ce11775e9"}, + {file = "pydantic_core-2.6.3-cp37-cp37m-macosx_10_7_x86_64.whl", hash = "sha256:a53e3195f134bde03620d87a7e2b2f2046e0e5a8195e66d0f244d6d5b2f6d31b"}, + {file = "pydantic_core-2.6.3-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:f2969e8f72c6236c51f91fbb79c33821d12a811e2a94b7aa59c65f8dbdfad34a"}, + {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:672174480a85386dd2e681cadd7d951471ad0bb028ed744c895f11f9d51b9ebe"}, + {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:002d0ea50e17ed982c2d65b480bd975fc41086a5a2f9c924ef8fc54419d1dea3"}, + {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3ccc13afee44b9006a73d2046068d4df96dc5b333bf3509d9a06d1b42db6d8bf"}, + {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:439a0de139556745ae53f9cc9668c6c2053444af940d3ef3ecad95b079bc9987"}, + {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d63b7545d489422d417a0cae6f9898618669608750fc5e62156957e609e728a5"}, + {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b44c42edc07a50a081672e25dfe6022554b47f91e793066a7b601ca290f71e42"}, + {file = "pydantic_core-2.6.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1c721bfc575d57305dd922e6a40a8fe3f762905851d694245807a351ad255c58"}, + {file = "pydantic_core-2.6.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:5e4a2cf8c4543f37f5dc881de6c190de08096c53986381daebb56a355be5dfe6"}, + {file = "pydantic_core-2.6.3-cp37-none-win32.whl", hash = "sha256:d9b4916b21931b08096efed090327f8fe78e09ae8f5ad44e07f5c72a7eedb51b"}, + {file = "pydantic_core-2.6.3-cp37-none-win_amd64.whl", hash = "sha256:a8acc9dedd304da161eb071cc7ff1326aa5b66aadec9622b2574ad3ffe225525"}, + {file = "pydantic_core-2.6.3-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:5e9c068f36b9f396399d43bfb6defd4cc99c36215f6ff33ac8b9c14ba15bdf6b"}, + {file = "pydantic_core-2.6.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e61eae9b31799c32c5f9b7be906be3380e699e74b2db26c227c50a5fc7988698"}, + {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d85463560c67fc65cd86153a4975d0b720b6d7725cf7ee0b2d291288433fc21b"}, + {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9616567800bdc83ce136e5847d41008a1d602213d024207b0ff6cab6753fe645"}, + {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9e9b65a55bbabda7fccd3500192a79f6e474d8d36e78d1685496aad5f9dbd92c"}, + {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f468d520f47807d1eb5d27648393519655eadc578d5dd862d06873cce04c4d1b"}, + {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9680dd23055dd874173a3a63a44e7f5a13885a4cfd7e84814be71be24fba83db"}, + {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9a718d56c4d55efcfc63f680f207c9f19c8376e5a8a67773535e6f7e80e93170"}, + {file = "pydantic_core-2.6.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8ecbac050856eb6c3046dea655b39216597e373aa8e50e134c0e202f9c47efec"}, + {file = "pydantic_core-2.6.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:788be9844a6e5c4612b74512a76b2153f1877cd845410d756841f6c3420230eb"}, + {file = "pydantic_core-2.6.3-cp38-none-win32.whl", hash = "sha256:07a1aec07333bf5adebd8264047d3dc518563d92aca6f2f5b36f505132399efc"}, + {file = "pydantic_core-2.6.3-cp38-none-win_amd64.whl", hash = "sha256:621afe25cc2b3c4ba05fff53525156d5100eb35c6e5a7cf31d66cc9e1963e378"}, + {file = "pydantic_core-2.6.3-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:813aab5bfb19c98ae370952b6f7190f1e28e565909bfc219a0909db168783465"}, + {file = "pydantic_core-2.6.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:50555ba3cb58f9861b7a48c493636b996a617db1a72c18da4d7f16d7b1b9952b"}, + {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19e20f8baedd7d987bd3f8005c146e6bcbda7cdeefc36fad50c66adb2dd2da48"}, + {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b0a5d7edb76c1c57b95df719af703e796fc8e796447a1da939f97bfa8a918d60"}, + {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f06e21ad0b504658a3a9edd3d8530e8cea5723f6ea5d280e8db8efc625b47e49"}, + {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ea053cefa008fda40f92aab937fb9f183cf8752e41dbc7bc68917884454c6362"}, + {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:171a4718860790f66d6c2eda1d95dd1edf64f864d2e9f9115840840cf5b5713f"}, + {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5ed7ceca6aba5331ece96c0e328cd52f0dcf942b8895a1ed2642de50800b79d3"}, + {file = "pydantic_core-2.6.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:acafc4368b289a9f291e204d2c4c75908557d4f36bd3ae937914d4529bf62a76"}, + {file = "pydantic_core-2.6.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:1aa712ba150d5105814e53cb141412217146fedc22621e9acff9236d77d2a5ef"}, + {file = "pydantic_core-2.6.3-cp39-none-win32.whl", hash = "sha256:44b4f937b992394a2e81a5c5ce716f3dcc1237281e81b80c748b2da6dd5cf29a"}, + {file = "pydantic_core-2.6.3-cp39-none-win_amd64.whl", hash = "sha256:9b33bf9658cb29ac1a517c11e865112316d09687d767d7a0e4a63d5c640d1b17"}, + {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-macosx_10_7_x86_64.whl", hash = "sha256:d7050899026e708fb185e174c63ebc2c4ee7a0c17b0a96ebc50e1f76a231c057"}, + {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:99faba727727b2e59129c59542284efebbddade4f0ae6a29c8b8d3e1f437beb7"}, + {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5fa159b902d22b283b680ef52b532b29554ea2a7fc39bf354064751369e9dbd7"}, + {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:046af9cfb5384f3684eeb3f58a48698ddab8dd870b4b3f67f825353a14441418"}, + {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:930bfe73e665ebce3f0da2c6d64455098aaa67e1a00323c74dc752627879fc67"}, + {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:85cc4d105747d2aa3c5cf3e37dac50141bff779545ba59a095f4a96b0a460e70"}, + {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:b25afe9d5c4f60dcbbe2b277a79be114e2e65a16598db8abee2a2dcde24f162b"}, + {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:e49ce7dc9f925e1fb010fc3d555250139df61fa6e5a0a95ce356329602c11ea9"}, + {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-macosx_10_7_x86_64.whl", hash = "sha256:2dd50d6a1aef0426a1d0199190c6c43ec89812b1f409e7fe44cb0fbf6dfa733c"}, + {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c6595b0d8c8711e8e1dc389d52648b923b809f68ac1c6f0baa525c6440aa0daa"}, + {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4ef724a059396751aef71e847178d66ad7fc3fc969a1a40c29f5aac1aa5f8784"}, + {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3c8945a105f1589ce8a693753b908815e0748f6279959a4530f6742e1994dcb6"}, + {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:c8c6660089a25d45333cb9db56bb9e347241a6d7509838dbbd1931d0e19dbc7f"}, + {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:692b4ff5c4e828a38716cfa92667661a39886e71136c97b7dac26edef18767f7"}, + {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:f1a5d8f18877474c80b7711d870db0eeef9442691fcdb00adabfc97e183ee0b0"}, + {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:3796a6152c545339d3b1652183e786df648ecdf7c4f9347e1d30e6750907f5bb"}, + {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:b962700962f6e7a6bd77e5f37320cabac24b4c0f76afeac05e9f93cf0c620014"}, + {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:56ea80269077003eaa59723bac1d8bacd2cd15ae30456f2890811efc1e3d4413"}, + {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75c0ebbebae71ed1e385f7dfd9b74c1cff09fed24a6df43d326dd7f12339ec34"}, + {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:252851b38bad3bfda47b104ffd077d4f9604a10cb06fe09d020016a25107bf98"}, + {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:6656a0ae383d8cd7cc94e91de4e526407b3726049ce8d7939049cbfa426518c8"}, + {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:d9140ded382a5b04a1c030b593ed9bf3088243a0a8b7fa9f071a5736498c5483"}, + {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:d38bbcef58220f9c81e42c255ef0bf99735d8f11edef69ab0b499da77105158a"}, + {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:c9d469204abcca28926cbc28ce98f28e50e488767b084fb3fbdf21af11d3de26"}, + {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:48c1ed8b02ffea4d5c9c220eda27af02b8149fe58526359b3c07eb391cb353a2"}, + {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b2b1bfed698fa410ab81982f681f5b1996d3d994ae8073286515ac4d165c2e7"}, + {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf9d42a71a4d7a7c1f14f629e5c30eac451a6fc81827d2beefd57d014c006c4a"}, + {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4292ca56751aebbe63a84bbfc3b5717abb09b14d4b4442cc43fd7c49a1529efd"}, + {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:7dc2ce039c7290b4ef64334ec7e6ca6494de6eecc81e21cb4f73b9b39991408c"}, + {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:615a31b1629e12445c0e9fc8339b41aaa6cc60bd53bf802d5fe3d2c0cda2ae8d"}, + {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:1fa1f6312fb84e8c281f32b39affe81984ccd484da6e9d65b3d18c202c666149"}, + {file = "pydantic_core-2.6.3.tar.gz", hash = "sha256:1508f37ba9e3ddc0189e6ff4e2228bd2d3c3a4641cbe8c07177162f76ed696c7"}, ] [package.dependencies] @@ -1306,4 +1333,4 @@ test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess [metadata] lock-version = "2.0" python-versions = "^3.10" -content-hash = "4e55abe2ad0d4e1327f95a950f3ca8a4fefb7c8d02a95ac6da7bde435abe8e9a" +content-hash = "8adf56c14896ce1548bf241e7ce24532f82c9a8a580bbde30c444fa5d6d6415d" diff --git a/pyproject.toml b/pyproject.toml index 89c193a..b9bd1ce 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,7 +7,7 @@ build-backend = "poetry.core.masonry.api" [tool.poetry] name = "stac-model" -version = "0.1.1.alpha3" +version = "0.1.1.alpha4" description = "A PydanticV2 validation and serialization libary for the STAC ML Model Extension" readme = "README.md" authors = ["Ryan Avery "] @@ -55,8 +55,8 @@ python = "^3.10" typer = {extras = ["all"], version = "^0.9.0"} rich = "^13.7.0" -pydantic = "^2.6.2" # bug in post 2.3 https://github.com/pydantic/pydantic/issues/7720 -pydantic-core = "^2.16.3" +pydantic = "2.3" # bug in post 2.3 https://github.com/pydantic/pydantic/issues/7720 +pydantic-core = "^2" numpy = "^1.26.2" # fastapi="^0.108.0" diff --git a/stac_model/examples.py b/stac_model/examples.py index 5a7d980..79f3a44 100644 --- a/stac_model/examples.py +++ b/stac_model/examples.py @@ -69,7 +69,7 @@ def eurosat_resnet(): input_array=input_array, norm_by_channel=True, norm_type="z_score", - rescale_type="none", + resize_type="none", statistics=stats, pre_processing_function = "https://github.com/microsoft/torchgeo/blob/545abe8326efc2848feae69d0212a15faba3eb00/torchgeo/datamodules/eurosat.py" # noqa: E501 , diff --git a/stac_model/input.py b/stac_model/input.py index baf8df8..4bc0db1 100644 --- a/stac_model/input.py +++ b/stac_model/input.py @@ -33,7 +33,6 @@ class ModelInput(BaseModel): name: str bands: List[str] input_array: InputArray - parameters: Dict[str, Union[int, str, bool, List[Union[int, str, bool]]]] = None norm_by_channel: bool = None norm_type: Literal[ "min_max", @@ -44,7 +43,9 @@ class ModelInput(BaseModel): "norm_with_clip", "none", ] = None - resize_type: Literal["crop", "pad", "interpolation", "none"] = None + resize_type: Literal["crop", "pad", "interpolate", "none"] = None + parameters: Optional[Dict[str, Union[int, str, bool, + List[Union[int, str, bool]]]]] = None statistics: Optional[Union[Statistics, List[Statistics]]] = None norm_with_clip_values: Optional[List[Union[float, int]]] = None pre_processing_function: Optional[str | AnyUrl] = None diff --git a/stac_model/output.py b/stac_model/output.py index 01b7d0b..543c26b 100644 --- a/stac_model/output.py +++ b/stac_model/output.py @@ -30,14 +30,14 @@ class ResultArray(BaseModel): class ClassObject(BaseModel): value: int name: str - description: str = None - title: str = None - color_hint: str = None - nodata: bool = False + description: Optional[str] = None + title: Optional[str] = None + color_hint: Optional[str] = None + nodata: Optional[bool] = False class ModelOutput(BaseModel): task: TaskEnum - result_array: List[ResultArray] = None - classification_classes: List[ClassObject] = None + result_array: Optional[List[ResultArray]] = None + classification_classes: Optional[List[ClassObject]] = None post_processing_function: Optional[str] = None diff --git a/stac_model/schema.py b/stac_model/schema.py index 3df0926..84a6f8c 100644 --- a/stac_model/schema.py +++ b/stac_model/schema.py @@ -1,4 +1,4 @@ -from typing import Dict, List, Union +from typing import Dict, List, Optional, Union from pydantic import BaseModel, ConfigDict @@ -23,7 +23,7 @@ class MLModel(BaseModel): mlm_total_parameters: int mlm_pretrained_source: str mlm_summary: str - mlm_parameters: Dict[str, Union[int, str, bool, List[Union[int, str, bool]]]] = None # noqa: E501 + mlm_parameters: Optional[Dict[str, Union[int, str, bool, List[Union[int, str, bool]]]]] = None # noqa: E501 model_config = ConfigDict(alias_generator=mlm_prefix_replacer, populate_by_name=True, extra="ignore") diff --git a/tests/test_schema.py b/tests/test_schema.py index 8259cb0..71359d2 100644 --- a/tests/test_schema.py +++ b/tests/test_schema.py @@ -11,4 +11,4 @@ def metadata_json(): def test_model_metadata_json_operations(metadata_json): from stac_model.schema import MLModel model_metadata = MLModel.model_validate_json(metadata_json) - assert model_metadata.name == "Resnet-18 Sentinel-2 ALL MOCO" + assert model_metadata.mlm_name == "Resnet-18 Sentinel-2 ALL MOCO" From 2b62d7b1f2e57d614ffa29828e0cfb5f40ae1b9d Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Tue, 27 Feb 2024 16:31:31 -0800 Subject: [PATCH 047/112] combine stac_model and pystac metadata --- best-practices.md | 4 + poetry.lock | 50 ++++++- pyproject.toml | 3 +- stac_model/examples.py | 4 +- stac_model/geometry_models.py | 39 ++++++ stac_model/schema.py | 238 +++++++++++++++++++++++++++++++++- 6 files changed, 330 insertions(+), 8 deletions(-) create mode 100644 stac_model/geometry_models.py diff --git a/best-practices.md b/best-practices.md index 1d24ff8..8d6023c 100644 --- a/best-practices.md +++ b/best-practices.md @@ -2,6 +2,10 @@ This document makes a number of recommendations for creating real world ML Model Extensions. None of them are required to meet the core specification, but following these practices will improve the documentation of your model and make life easier for client tooling and users. They come about from practical experience of implementors and introduce a bit more 'constraint' for those who are creating STAC objects representing their models or creating tools to work with STAC. +## Using STAC Common Metadata Fields for the ML Model Extension + +We recommend using the `start_datetime` and `end_datetime`, `geometry`, and `bbox` to represent the recommended context of data the model was trained with and for which the model should have appropriate domain knowledge for inference. For example, we can consider a model which is trained on imagery from all over the world and is robust enough to be applied to any time period. In this case, the common metadata to use with the model would include the bbox of "the world" `[-90, -180, 90, 180]` and the start_datetime and end_datetime range could be generic values like `["1900-01-01", null]`. + ## Recommended Extensions to Compose with the ML Model Extension ### Processing Extension diff --git a/poetry.lock b/poetry.lock index 3e3b246..55aef26 100644 --- a/poetry.lock +++ b/poetry.lock @@ -785,6 +785,29 @@ files = [ plugins = ["importlib-metadata"] windows-terminal = ["colorama (>=0.4.6)"] +[[package]] +name = "pystac" +version = "1.9.0" +description = "Python library for working with the SpatioTemporal Asset Catalog (STAC) specification" +optional = false +python-versions = ">=3.9" +files = [ + {file = "pystac-1.9.0-py3-none-any.whl", hash = "sha256:64d5654166290169ad6ad2bc0d5337a1664ede1165635f0b73b327065b801a2f"}, + {file = "pystac-1.9.0.tar.gz", hash = "sha256:c6b5a86e241fca5e9267a7902c26679f208749a107e9015fe6aaf73a9dd40948"}, +] + +[package.dependencies] +python-dateutil = ">=2.7.0" + +[package.extras] +bench = ["asv (>=0.6.0,<0.7.0)", "packaging (>=23.1,<24.0)", "virtualenv (>=20.22,<21.0)"] +docs = ["Sphinx (>=6.2,<7.0)", "boto3 (>=1.28,<2.0)", "ipython (>=8.12,<9.0)", "jinja2 (<4.0)", "jupyter (>=1.0,<2.0)", "nbsphinx (>=0.9.0,<0.10.0)", "pydata-sphinx-theme (>=0.13,<1.0)", "rasterio (>=1.3,<2.0)", "shapely (>=2.0,<3.0)", "sphinx-autobuild (==2021.3.14)", "sphinx-design (>=0.5.0,<0.6.0)", "sphinxcontrib-fulltoc (>=1.2,<2.0)"] +jinja2 = ["jinja2 (<4.0)"] +orjson = ["orjson (>=3.5)"] +test = ["black (>=23.3,<24.0)", "codespell (>=2.2,<3.0)", "coverage (>=7.2,<8.0)", "doc8 (>=1.1,<2.0)", "html5lib (>=1.1,<2.0)", "jinja2 (<4.0)", "jsonschema (>=4.18,<5.0)", "mypy (>=1.2,<2.0)", "orjson (>=3.8,<4.0)", "pre-commit (>=3.2,<4.0)", "pytest (>=7.3,<8.0)", "pytest-cov (>=4.0,<5.0)", "pytest-mock (>=3.10,<4.0)", "pytest-recording (>=0.13.0,<0.14.0)", "requests-mock (>=1.11,<2.0)", "ruff (==0.1.1)", "types-html5lib (>=1.1,<2.0)", "types-jsonschema (>=4.18,<5.0)", "types-orjson (>=3.6,<4.0)", "types-python-dateutil (>=2.8,<3.0)", "types-urllib3 (>=1.26,<2.0)"] +urllib3 = ["urllib3 (>=1.26)"] +validation = ["jsonschema (>=4.18,<5.0)"] + [[package]] name = "pytest" version = "7.4.4" @@ -957,6 +980,20 @@ files = [ [package.dependencies] pytest = ">=5.0.0" +[[package]] +name = "python-dateutil" +version = "2.8.2" +description = "Extensions to the standard Python datetime module" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +files = [ + {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, + {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, +] + +[package.dependencies] +six = ">=1.5" + [[package]] name = "pyyaml" version = "6.0.1" @@ -1208,6 +1245,17 @@ files = [ {file = "shellingham-1.5.4.tar.gz", hash = "sha256:8dbca0739d487e5bd35ab3ca4b36e11c4078f3a234bfce294b0a0291363404de"}, ] +[[package]] +name = "six" +version = "1.16.0" +description = "Python 2 and 3 compatibility utilities" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, + {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, +] + [[package]] name = "snowballstemmer" version = "2.2.0" @@ -1333,4 +1381,4 @@ test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess [metadata] lock-version = "2.0" python-versions = "^3.10" -content-hash = "8adf56c14896ce1548bf241e7ce24532f82c9a8a580bbde30c444fa5d6d6415d" +content-hash = "22fb0b0e7386f5abc1f2f7aa52630ace3c35cbdba9d94e75f0d5a1935f3574e9" diff --git a/pyproject.toml b/pyproject.toml index b9bd1ce..bad04fe 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -57,8 +57,7 @@ typer = {extras = ["all"], version = "^0.9.0"} rich = "^13.7.0" pydantic = "2.3" # bug in post 2.3 https://github.com/pydantic/pydantic/issues/7720 pydantic-core = "^2" -numpy = "^1.26.2" -# fastapi="^0.108.0" +pystac = "^1.9.0" [tool.poetry.group.dev.dependencies] diff --git a/stac_model/examples.py b/stac_model/examples.py index 79f3a44..79aaf41 100644 --- a/stac_model/examples.py +++ b/stac_model/examples.py @@ -2,7 +2,7 @@ Asset, ClassObject, InputArray, - MLModel, + MLModelExtension, ModelInput, ModelOutput, ResultArray, @@ -112,7 +112,7 @@ def eurosat_resnet(): output_shape=[-1, 10], result_array=[result_array], ) - ml_model_meta = MLModel( + ml_model_meta = MLModelExtension( mlm_name="Resnet-18 Sentinel-2 ALL MOCO", mlm_task="classification", mlm_framework="pytorch", diff --git a/stac_model/geometry_models.py b/stac_model/geometry_models.py new file mode 100644 index 0000000..125e08b --- /dev/null +++ b/stac_model/geometry_models.py @@ -0,0 +1,39 @@ +from typing import List, Literal, Union + +from pydantic import ( + BaseModel, +) + + +class Geometry(BaseModel): + type: str + coordinates: List + + +class GeoJSONPoint(Geometry): + type: Literal["Point"] + coordinates: List[float] + + +class GeoJSONMultiPoint(Geometry): + type: Literal["MultiPoint"] + coordinates: List[List[float]] + + +class GeoJSONPolygon(Geometry): + type: Literal["Polygon"] + coordinates: List[List[List[float]]] + + +class GeoJSONMultiPolygon(Geometry): + type: Literal["MultiPolygon"] + coordinates: List[List[List[List[float]]]] + + +AnyGeometry = Union[ + Geometry, + GeoJSONPoint, + GeoJSONMultiPoint, + GeoJSONPolygon, + GeoJSONMultiPolygon, +] diff --git a/stac_model/schema.py b/stac_model/schema.py index 84a6f8c..1a60c30 100644 --- a/stac_model/schema.py +++ b/stac_model/schema.py @@ -1,16 +1,48 @@ -from typing import Dict, List, Optional, Union +import json +from datetime import datetime +from typing import ( + Any, + Dict, + Generic, + Iterable, + List, + Literal, + MutableMapping, + Optional, + TypeVar, + Union, + cast, + get_args, +) +import pystac from pydantic import BaseModel, ConfigDict +from pydantic.fields import FieldInfo +from pystac.extensions import item_assets +from pystac.extensions.base import ( + ExtensionManagementMixin, + PropertiesExtension, + S, # generic pystac.STACObject + SummariesExtension, +) +from .geometry_models import AnyGeometry from .input import Band, InputArray, ModelInput, Statistics from .output import ClassObject, ModelOutput, ResultArray, TaskEnum from .runtime import Asset, Container, Runtime +T = TypeVar("T", pystac.Collection, pystac.Item, pystac.Asset, + item_assets.AssetDefinition) + +SchemaName = Literal["mlm"] +# TODO update +SCHEMA_URI: str = "https://raw.githubusercontent.com/crim-ca/dlm-extension/main/json-schema/schema.json" #noqa: E501 +PREFIX = f"{get_args(SchemaName)[0]}:" def mlm_prefix_replacer(field_name: str) -> str: return field_name.replace("mlm_", "mlm:") -class MLModel(BaseModel): +class MLModelProperties(BaseModel): mlm_name: str mlm_task: TaskEnum mlm_framework: str @@ -28,9 +60,209 @@ class MLModel(BaseModel): model_config = ConfigDict(alias_generator=mlm_prefix_replacer, populate_by_name=True, extra="ignore") +class MLModelHelper: + def __init__(self, attrs: MutableMapping[str, Any]): + self.attrs = attrs + self.mlmodel_attrs = attrs["attributes"] + + @property + def uid(self) -> str: + """Return a unique ID for MLModel data item.""" + keys = [ + "mlm_name", + "mlm_task", + ] + name = "_".join("_".join( + self.mlmodel_attrs[k].split(" ")) for k in keys).lower() + return name + + @property + def properties(self) -> MLModelProperties: + props = MLModelProperties(**self.mlmodel_attrs) + return props + + def stac_item(self, geometry: AnyGeometry, bbox: List[float], + start_datetime: datetime, end_datetime: datetime) -> pystac.Item: + item = pystac.Item( + id=self.uid, + geometry=geometry, + bbox=bbox, + properties={ + "start_datetime": start_datetime, + "end_datetime": end_datetime, + }, + datetime=None, + ) + item_mlmodel = MLModelExtension.ext(item, add_if_missing=True) + item_mlmodel.apply(self.properties) + return item + + +class MLModelExtension( + Generic[T], + PropertiesExtension, + ExtensionManagementMixin[Union[pystac.Asset, pystac.Item, pystac.Collection]], +): + @property + def name(self) -> SchemaName: + return get_args(SchemaName)[0] + + def apply( + self, + properties: Union[MLModelProperties, dict[str, Any]], + ) -> None: + """Applies Machine Learning Model Extension properties to the extended + :class:`~pystac.Item` or :class:`~pystac.Asset`. + """ + if isinstance(properties, dict): + properties = MLModelProperties(**properties) + data_json = json.loads(properties.model_dump_json(by_alias=True)) + for prop, val in data_json.items(): + self._set_property(prop, val) + + @classmethod + def get_schema_uri(cls) -> str: + return SCHEMA_URI + + @classmethod + def has_extension(cls, obj: S): + # FIXME: this override should be removed once an official and + # versioned schema is released ignore the original implementation + # logic for a version regex since in our case, the VERSION_REGEX + # is not fulfilled (ie: using 'main' branch, no tag available...) + ext_uri = cls.get_schema_uri() + return obj.stac_extensions is not None and any( + uri == ext_uri for uri in obj.stac_extensions) + + @classmethod + def ext(cls, obj: T, add_if_missing: bool = False) -> "MLModelExtension[T]": + """Extends the given STAC Object with properties from the + :stac-ext:`Machine Learning Model Extension `. + + This extension can be applied to instances of :class:`~pystac.Item` or + :class:`~pystac.Asset`. + + Raises: + + pystac.ExtensionTypeError : If an invalid object type is passed. + """ + if isinstance(obj, pystac.Collection): + cls.ensure_has_extension(obj, add_if_missing) + return cast(MLModelExtension[T], CollectionMLModelExtension(obj)) + elif isinstance(obj, pystac.Item): + cls.ensure_has_extension(obj, add_if_missing) + return cast(MLModelExtension[T], ItemMLModelExtension(obj)) + elif isinstance(obj, pystac.Asset): + cls.ensure_owner_has_extension(obj, add_if_missing) + return cast(MLModelExtension[T], AssetMLModelExtension(obj)) + elif isinstance(obj, item_assets.AssetDefinition): + cls.ensure_owner_has_extension(obj, add_if_missing) + return cast(MLModelExtension[T], ItemAssetsMLModelExtension(obj)) + else: + raise pystac.ExtensionTypeError(cls._ext_error_message(obj)) + + @classmethod + def summaries( + cls, obj: pystac.Collection, add_if_missing: bool = False + ) -> "SummariesMLModelExtension": + """Returns the extended summaries object for the given collection.""" + cls.ensure_has_extension(obj, add_if_missing) + return SummariesMLModelExtension(obj) + +class SummariesMLModelExtension(SummariesExtension): + """A concrete implementation of :class:`~SummariesExtension` that extends + the ``summaries`` field of a :class:`~pystac.Collection` to include properties + defined in the :stac-ext:`Machine Learning Model `. + """ + def _check_mlm_property(self, prop: str) -> FieldInfo: + try: + return MLModelProperties.model_fields[prop] + except KeyError as err: + raise AttributeError( + f"Name '{prop}' is not a valid MLM property.") from err + + def _validate_mlm_property(self, prop: str, summaries: list[Any]) -> None: + model = MLModelProperties.model_construct() + validator = MLModelProperties.__pydantic_validator__ + for value in summaries: + validator.validate_assignment(model, prop, value) + + def get_mlm_property(self, prop: str) -> list[Any]: + self._check_mlm_property(prop) + return self.summaries.get_list(prop) + + def set_mlm_property(self, prop: str, summaries: list[Any]) -> None: + self._check_mlm_property(prop) + self._validate_mlm_property(prop, summaries) + self._set_summary(prop, summaries) + + def __getattr__(self, prop): + return self.get_mlm_property(prop) + + def __setattr__(self, prop, value): + self.set_mlm_property(prop, value) + +class ItemMLModelExtension(MLModelExtension[pystac.Item]): + """A concrete implementation of :class:`MLModelExtension` on an + :class:`~pystac.Item` that extends the properties of the Item to + include properties defined in the :stac-ext:`Machine Learning Model + Extension `. + + This class should generally not be instantiated directly. Instead, call + :meth:`MLModelExtension.ext` on an :class:`~pystac.Item` to extend it. + """ + + def __init__(self, item: pystac.Item): + self.item = item + self.properties = item.properties + + def __repr__(self) -> str: + return f"" + +class ItemAssetsMLModelExtension(MLModelExtension[item_assets.AssetDefinition]): + properties: dict[str, Any] + asset_defn: item_assets.AssetDefinition + + def __init__(self, item_asset: item_assets.AssetDefinition): + self.asset_defn = item_asset + self.properties = item_asset.properties + +class AssetMLModelExtension(MLModelExtension[pystac.Asset]): + """A concrete implementation of :class:`MLModelExtension` on an + :class:`~pystac.Asset` that extends the Asset fields to include + properties defined in the :stac-ext:`Machine Learning Model + Extension `. + + This class should generally not be instantiated directly. Instead, call + :meth:`MLModelExtension.ext` on an :class:`~pystac.Asset` to extend it. + """ + + asset_href: str + """The ``href`` value of the :class:`~pystac.Asset` being extended.""" + + properties: dict[str, Any] + """The :class:`~pystac.Asset` fields, including extension properties.""" + + additional_read_properties: Optional[Iterable[dict[str, Any]]] = None + """If present, this will be a list containing 1 dictionary representing the + properties of the owning :class:`~pystac.Item`.""" + + def __init__(self, asset: pystac.Asset): + self.asset_href = asset.href + self.properties = asset.extra_fields + if asset.owner and isinstance(asset.owner, pystac.Item): + self.additional_read_properties = [asset.owner.properties] + + def __repr__(self) -> str: + return f"" + +class CollectionMLModelExtension(MLModelExtension[pystac.Collection]): + + def __init__(self, collection: pystac.Collection): + self.collection = collection __all__ = [ - "MLModel", + "MLModelExtension", "ModelInput", "InputArray", "Band", From 45901417b736f3c9c2faa57d17d372792377aa79 Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Tue, 27 Feb 2024 16:56:16 -0800 Subject: [PATCH 048/112] produce pystac item in example but can't serialize datetime --- stac_model/examples.py | 18 +++++++++++++++--- stac_model/runtime.py | 16 ++-------------- stac_model/schema.py | 5 ++--- tests/test_schema.py | 10 +++++----- 4 files changed, 24 insertions(+), 25 deletions(-) diff --git a/stac_model/examples.py b/stac_model/examples.py index 79aaf41..fbc2ab8 100644 --- a/stac_model/examples.py +++ b/stac_model/examples.py @@ -1,8 +1,11 @@ +from datetime import datetime + from stac_model.schema import ( Asset, ClassObject, InputArray, - MLModelExtension, + MLModelHelper, + MLModelProperties, ModelInput, ModelOutput, ResultArray, @@ -112,7 +115,7 @@ def eurosat_resnet(): output_shape=[-1, 10], result_array=[result_array], ) - ml_model_meta = MLModelExtension( + ml_model_meta = MLModelProperties( mlm_name="Resnet-18 Sentinel-2 ALL MOCO", mlm_task="classification", mlm_framework="pytorch", @@ -129,4 +132,13 @@ def eurosat_resnet(): mlm_runtime=[mlm_runtime], mlm_output=[mlm_output], ) - return ml_model_meta + + mlmodel_helper = MLModelHelper(attrs = ml_model_meta.model_dump()) + geometry=None + bbox = [-90, -180, 90, 180] + start_time = datetime.strptime("1900-01-01", '%Y-%m-%d') + end_time = None + item = mlmodel_helper.stac_item(geometry, bbox, start_datetime=start_time, + end_datetime=end_time) + + return item diff --git a/stac_model/runtime.py b/stac_model/runtime.py index 2872abf..ef78812 100644 --- a/stac_model/runtime.py +++ b/stac_model/runtime.py @@ -1,7 +1,7 @@ from enum import Enum from typing import List, Optional -from pydantic import BaseModel, ConfigDict, FilePath, field_validator +from pydantic import AnyUrl, BaseModel, ConfigDict, FilePath from .paths import S3Path @@ -11,7 +11,7 @@ class Asset(BaseModel): Follows the STAC Asset Object spec. """ - href: S3Path | FilePath | str + href: S3Path | FilePath | AnyUrl| str title: Optional[str] = None description: Optional[str] = None type: Optional[str] = None @@ -19,18 +19,6 @@ class Asset(BaseModel): model_config = ConfigDict(arbitrary_types_allowed = True) - @field_validator("href") - @classmethod - def check_path_type(cls, v): - if isinstance(v, str): - v = S3Path(url=v) if v.startswith("s3://") else FilePath(f=v) - else: - raise ValueError( - f"Expected str, S3Path, or FilePath input, received {type(v).__name__}" - ) - return v - - class Container(BaseModel): container_file: str image_name: str diff --git a/stac_model/schema.py b/stac_model/schema.py index 1a60c30..115b66a 100644 --- a/stac_model/schema.py +++ b/stac_model/schema.py @@ -63,7 +63,6 @@ class MLModelProperties(BaseModel): class MLModelHelper: def __init__(self, attrs: MutableMapping[str, Any]): self.attrs = attrs - self.mlmodel_attrs = attrs["attributes"] @property def uid(self) -> str: @@ -73,12 +72,12 @@ def uid(self) -> str: "mlm_task", ] name = "_".join("_".join( - self.mlmodel_attrs[k].split(" ")) for k in keys).lower() + self.attrs[k].split(" ")) for k in keys).lower() return name @property def properties(self) -> MLModelProperties: - props = MLModelProperties(**self.mlmodel_attrs) + props = MLModelProperties(**self.attrs) return props def stac_item(self, geometry: AnyGeometry, bbox: List[float], diff --git a/tests/test_schema.py b/tests/test_schema.py index 71359d2..cd3eb8a 100644 --- a/tests/test_schema.py +++ b/tests/test_schema.py @@ -5,10 +5,10 @@ @pytest.fixture def metadata_json(): from stac_model.examples import eurosat_resnet - model_metadata = eurosat_resnet() - return model_metadata.model_dump_json(indent=2) + model_metadata_stac_item = eurosat_resnet() + return model_metadata_stac_item -def test_model_metadata_json_operations(metadata_json): - from stac_model.schema import MLModel - model_metadata = MLModel.model_validate_json(metadata_json) +def test_model_metadata_json_operations(model_metadata_stac_item): + from stac_model.schema import MLModelExtension + model_metadata = MLModelExtension.apply(model_metadata_stac_item) assert model_metadata.mlm_name == "Resnet-18 Sentinel-2 ALL MOCO" From 4fc2e8ed721779f752d5913bc1c8397c1246ff3e Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Wed, 28 Feb 2024 13:12:05 -0800 Subject: [PATCH 049/112] remove helper and use pystac.Item in example --- stac_model/__main__.py | 3 ++ stac_model/examples.py | 37 ++++++++++++++-------- stac_model/input.py | 5 +-- stac_model/runtime.py | 5 +-- stac_model/schema.py | 71 +++++++++++++----------------------------- tests/test_schema.py | 13 +++++--- 6 files changed, 64 insertions(+), 70 deletions(-) diff --git a/stac_model/__main__.py b/stac_model/__main__.py index 6eeb4bd..4fbfc32 100644 --- a/stac_model/__main__.py +++ b/stac_model/__main__.py @@ -14,12 +14,14 @@ ) console = Console() + def version_callback(print_version: bool) -> None: """Print the version of the package.""" if print_version: console.print(f"[yellow]stac-model[/] version: [bold blue]{__version__}[/]") raise typer.Exit() + @app.command(name="") def main( print_version: bool = typer.Option( @@ -40,5 +42,6 @@ def main( print("Example model metadata written to ./example.json.") return ml_model_meta + if __name__ == "__main__": app() diff --git a/stac_model/examples.py b/stac_model/examples.py index fbc2ab8..40f3365 100644 --- a/stac_model/examples.py +++ b/stac_model/examples.py @@ -1,10 +1,12 @@ from datetime import datetime +import pystac + from stac_model.schema import ( Asset, ClassObject, InputArray, - MLModelHelper, + MLModelExtension, MLModelProperties, ModelInput, ModelOutput, @@ -15,7 +17,6 @@ def eurosat_resnet(): - input_array = InputArray( shape=[-1, 13, 64, 64], dim_order="bchw", data_type="float32" ) @@ -74,8 +75,7 @@ def eurosat_resnet(): norm_type="z_score", resize_type="none", statistics=stats, - pre_processing_function = "https://github.com/microsoft/torchgeo/blob/545abe8326efc2848feae69d0212a15faba3eb00/torchgeo/datamodules/eurosat.py" # noqa: E501 -, + pre_processing_function="https://github.com/microsoft/torchgeo/blob/545abe8326efc2848feae69d0212a15faba3eb00/torchgeo/datamodules/eurosat.py", # noqa: E501 ) mlm_runtime = Runtime( framework="torch", @@ -132,13 +132,24 @@ def eurosat_resnet(): mlm_runtime=[mlm_runtime], mlm_output=[mlm_output], ) - - mlmodel_helper = MLModelHelper(attrs = ml_model_meta.model_dump()) - geometry=None + start_datetime = datetime.strptime("1900-01-01", "%Y-%m-%d") + end_datetime = None + geometry = None bbox = [-90, -180, 90, 180] - start_time = datetime.strptime("1900-01-01", '%Y-%m-%d') - end_time = None - item = mlmodel_helper.stac_item(geometry, bbox, start_datetime=start_time, - end_datetime=end_time) - - return item + name = ( + "_".join(ml_model_meta.mlm_name.split(" ")).lower() + + f"_{ml_model_meta.mlm_task}".lower() + ) + item = pystac.Item( + id=name, + geometry=geometry, + bbox=bbox, + datetime=None, + properties={"start_datetime": start_datetime, "end_datetime": end_datetime}, + ) + item.add_derived_from( + "https://earth-search.aws.element84.com/v1/collections/sentinel-2-l2a" + ) + item_mlmodel = MLModelExtension.ext(item, add_if_missing=True) + item_mlmodel.apply(ml_model_meta.model_dump()) + return item_mlmodel diff --git a/stac_model/input.py b/stac_model/input.py index 4bc0db1..318d766 100644 --- a/stac_model/input.py +++ b/stac_model/input.py @@ -44,8 +44,9 @@ class ModelInput(BaseModel): "none", ] = None resize_type: Literal["crop", "pad", "interpolate", "none"] = None - parameters: Optional[Dict[str, Union[int, str, bool, - List[Union[int, str, bool]]]]] = None + parameters: Optional[ + Dict[str, Union[int, str, bool, List[Union[int, str, bool]]]] + ] = None statistics: Optional[Union[Statistics, List[Statistics]]] = None norm_with_clip_values: Optional[List[Union[float, int]]] = None pre_processing_function: Optional[str | AnyUrl] = None diff --git a/stac_model/runtime.py b/stac_model/runtime.py index ef78812..dc11081 100644 --- a/stac_model/runtime.py +++ b/stac_model/runtime.py @@ -11,13 +11,14 @@ class Asset(BaseModel): Follows the STAC Asset Object spec. """ - href: S3Path | FilePath | AnyUrl| str + href: S3Path | FilePath | AnyUrl | str title: Optional[str] = None description: Optional[str] = None type: Optional[str] = None roles: Optional[List[str]] = None - model_config = ConfigDict(arbitrary_types_allowed = True) + model_config = ConfigDict(arbitrary_types_allowed=True) + class Container(BaseModel): container_file: str diff --git a/stac_model/schema.py b/stac_model/schema.py index 115b66a..cbf4ac5 100644 --- a/stac_model/schema.py +++ b/stac_model/schema.py @@ -1,5 +1,4 @@ import json -from datetime import datetime from typing import ( Any, Dict, @@ -7,7 +6,6 @@ Iterable, List, Literal, - MutableMapping, Optional, TypeVar, Union, @@ -26,22 +24,24 @@ SummariesExtension, ) -from .geometry_models import AnyGeometry from .input import Band, InputArray, ModelInput, Statistics from .output import ClassObject, ModelOutput, ResultArray, TaskEnum from .runtime import Asset, Container, Runtime -T = TypeVar("T", pystac.Collection, pystac.Item, pystac.Asset, - item_assets.AssetDefinition) +T = TypeVar( + "T", pystac.Collection, pystac.Item, pystac.Asset, item_assets.AssetDefinition +) SchemaName = Literal["mlm"] # TODO update -SCHEMA_URI: str = "https://raw.githubusercontent.com/crim-ca/dlm-extension/main/json-schema/schema.json" #noqa: E501 +SCHEMA_URI: str = "https://raw.githubusercontent.com/crim-ca/dlm-extension/main/json-schema/schema.json" # noqa: E501 PREFIX = f"{get_args(SchemaName)[0]}:" + def mlm_prefix_replacer(field_name: str) -> str: return field_name.replace("mlm_", "mlm:") + class MLModelProperties(BaseModel): mlm_name: str mlm_task: TaskEnum @@ -55,46 +55,13 @@ class MLModelProperties(BaseModel): mlm_total_parameters: int mlm_pretrained_source: str mlm_summary: str - mlm_parameters: Optional[Dict[str, Union[int, str, bool, List[Union[int, str, bool]]]]] = None # noqa: E501 - - model_config = ConfigDict(alias_generator=mlm_prefix_replacer, - populate_by_name=True, extra="ignore") - -class MLModelHelper: - def __init__(self, attrs: MutableMapping[str, Any]): - self.attrs = attrs + mlm_parameters: Optional[ + Dict[str, Union[int, str, bool, List[Union[int, str, bool]]]] + ] = None # noqa: E501 - @property - def uid(self) -> str: - """Return a unique ID for MLModel data item.""" - keys = [ - "mlm_name", - "mlm_task", - ] - name = "_".join("_".join( - self.attrs[k].split(" ")) for k in keys).lower() - return name - - @property - def properties(self) -> MLModelProperties: - props = MLModelProperties(**self.attrs) - return props - - def stac_item(self, geometry: AnyGeometry, bbox: List[float], - start_datetime: datetime, end_datetime: datetime) -> pystac.Item: - item = pystac.Item( - id=self.uid, - geometry=geometry, - bbox=bbox, - properties={ - "start_datetime": start_datetime, - "end_datetime": end_datetime, - }, - datetime=None, - ) - item_mlmodel = MLModelExtension.ext(item, add_if_missing=True) - item_mlmodel.apply(self.properties) - return item + model_config = ConfigDict( + alias_generator=mlm_prefix_replacer, populate_by_name=True, extra="ignore" + ) class MLModelExtension( @@ -131,7 +98,8 @@ def has_extension(cls, obj: S): # is not fulfilled (ie: using 'main' branch, no tag available...) ext_uri = cls.get_schema_uri() return obj.stac_extensions is not None and any( - uri == ext_uri for uri in obj.stac_extensions) + uri == ext_uri for uri in obj.stac_extensions + ) @classmethod def ext(cls, obj: T, add_if_missing: bool = False) -> "MLModelExtension[T]": @@ -168,17 +136,18 @@ def summaries( cls.ensure_has_extension(obj, add_if_missing) return SummariesMLModelExtension(obj) + class SummariesMLModelExtension(SummariesExtension): """A concrete implementation of :class:`~SummariesExtension` that extends the ``summaries`` field of a :class:`~pystac.Collection` to include properties defined in the :stac-ext:`Machine Learning Model `. """ + def _check_mlm_property(self, prop: str) -> FieldInfo: try: return MLModelProperties.model_fields[prop] except KeyError as err: - raise AttributeError( - f"Name '{prop}' is not a valid MLM property.") from err + raise AttributeError(f"Name '{prop}' is not a valid MLM property.") from err def _validate_mlm_property(self, prop: str, summaries: list[Any]) -> None: model = MLModelProperties.model_construct() @@ -201,6 +170,7 @@ def __getattr__(self, prop): def __setattr__(self, prop, value): self.set_mlm_property(prop, value) + class ItemMLModelExtension(MLModelExtension[pystac.Item]): """A concrete implementation of :class:`MLModelExtension` on an :class:`~pystac.Item` that extends the properties of the Item to @@ -218,6 +188,7 @@ def __init__(self, item: pystac.Item): def __repr__(self) -> str: return f"" + class ItemAssetsMLModelExtension(MLModelExtension[item_assets.AssetDefinition]): properties: dict[str, Any] asset_defn: item_assets.AssetDefinition @@ -226,6 +197,7 @@ def __init__(self, item_asset: item_assets.AssetDefinition): self.asset_defn = item_asset self.properties = item_asset.properties + class AssetMLModelExtension(MLModelExtension[pystac.Asset]): """A concrete implementation of :class:`MLModelExtension` on an :class:`~pystac.Asset` that extends the Asset fields to include @@ -255,11 +227,12 @@ def __init__(self, asset: pystac.Asset): def __repr__(self) -> str: return f"" -class CollectionMLModelExtension(MLModelExtension[pystac.Collection]): +class CollectionMLModelExtension(MLModelExtension[pystac.Collection]): def __init__(self, collection: pystac.Collection): self.collection = collection + __all__ = [ "MLModelExtension", "ModelInput", diff --git a/tests/test_schema.py b/tests/test_schema.py index cd3eb8a..2c12ec9 100644 --- a/tests/test_schema.py +++ b/tests/test_schema.py @@ -1,14 +1,19 @@ - import pytest @pytest.fixture def metadata_json(): from stac_model.examples import eurosat_resnet + model_metadata_stac_item = eurosat_resnet() return model_metadata_stac_item -def test_model_metadata_json_operations(model_metadata_stac_item): + +def test_model_metadata_to_dict(metadata_json): + assert metadata_json.to_dict() + + +def test_model_metadata_json_operations(metadata_json): from stac_model.schema import MLModelExtension - model_metadata = MLModelExtension.apply(model_metadata_stac_item) - assert model_metadata.mlm_name == "Resnet-18 Sentinel-2 ALL MOCO" + + assert MLModelExtension(metadata_json.to_dict()) From 30269d488175803817b72e6b5ce538251831eb7d Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Wed, 28 Feb 2024 13:41:39 -0800 Subject: [PATCH 050/112] update cli example. still getting datetime serialization issue --- example.json | 163 ++--------------------------------------- stac_model/__main__.py | 8 +- tests/test_schema.py | 15 ++-- 3 files changed, 14 insertions(+), 172 deletions(-) diff --git a/example.json b/example.json index ef32350..790c8dd 100644 --- a/example.json +++ b/example.json @@ -1,159 +1,6 @@ { - "mlm:name": "Resnet-18 Sentinel-2 ALL MOCO", - "mlm:task": "classification", - "mlm:framework": "pytorch", - "mlm:framework_version": "2.1.2+cu121", - "mlm:file_size": 1, - "mlm:memory_size": 1, - "mlm:input": [ - { - "name": "13 Band Sentinel-2 Batch", - "bands": [ - "B01", - "B02", - "B03", - "B04", - "B05", - "B06", - "B07", - "B08", - "B8A", - "B09", - "B10", - "B11", - "B12" - ], - "input_array": { - "shape": [ - -1, - 13, - 64, - 64 - ], - "dim_order": "bchw", - "data_type": "float32" - }, - "norm_by_channel": true, - "norm_type": "z_score", - "statistics": { - "mean": [ - 1354.40546513, - 1118.24399958, - 1042.92983953, - 947.62620298, - 1199.47283961, - 1999.79090914, - 2369.22292565, - 2296.82608323, - 732.08340178, - 12.11327804, - 1819.01027855, - 1118.92391149, - 2594.14080798 - ], - "stddev": [ - 245.71762908, - 333.00778264, - 395.09249139, - 593.75055589, - 566.4170017, - 861.18399006, - 1086.63139075, - 1117.98170791, - 404.91978886, - 4.77584468, - 1002.58768311, - 761.30323499, - 1231.58581042 - ] - }, - "pre_processing_function": "https://github.com/microsoft/torchgeo/blob/545abe8326efc2848feae69d0212a15faba3eb00/torchgeo/datamodules/eurosat.py" - } - ], - "mlm:output": [ - { - "task": "classification", - "result_array": [ - { - "shape": [ - -1, - 10 - ], - "dim_names": [ - "batch", - "class" - ], - "data_type": "float32" - } - ], - "classification_classes": [ - { - "value": 0, - "name": "Annual Crop", - "nodata": false - }, - { - "value": 1, - "name": "Forest", - "nodata": false - }, - { - "value": 2, - "name": "Herbaceous Vegetation", - "nodata": false - }, - { - "value": 3, - "name": "Highway", - "nodata": false - }, - { - "value": 4, - "name": "Industrial Buildings", - "nodata": false - }, - { - "value": 5, - "name": "Pasture", - "nodata": false - }, - { - "value": 6, - "name": "Permanent Crop", - "nodata": false - }, - { - "value": 7, - "name": "Residential Buildings", - "nodata": false - }, - { - "value": 8, - "name": "River", - "nodata": false - }, - { - "value": 9, - "name": "SeaLake", - "nodata": false - } - ] - } - ], - "mlm:runtime": [ - { - "asset": { - "href": "." - }, - "source_code": { - "href": "." - }, - "accelerator": "cuda", - "accelerator_constrained": false, - "hardware_summary": "Unknown" - } - ], - "mlm:total_parameters": 11700000, - "mlm:pretrained_source": "EuroSat Sentinel-2", - "mlm:summary": "Sourced from torchgeo python library,identifier is ResNet18_Weights.SENTINEL2_ALL_MOCO" -} + "type": "Feature", + "stac_version": "1.0.0", + "id": "resnet-18_sentinel-2_all_moco_classification", + "properties": { + "start_datetime": \ No newline at end of file diff --git a/stac_model/__main__.py b/stac_model/__main__.py index 4fbfc32..7c2a1c3 100644 --- a/stac_model/__main__.py +++ b/stac_model/__main__.py @@ -1,6 +1,6 @@ import typer from rich.console import Console - +import json from stac_model import __version__ from stac_model.examples import eurosat_resnet @@ -35,10 +35,8 @@ def main( ) -> None: """Generate example spec.""" ml_model_meta = eurosat_resnet() - json_str = ml_model_meta.model_dump_json(indent=2, exclude_none=True, by_alias=True) - with open("example.json", "w") as file: - file.write(json_str) - print(ml_model_meta.model_dump_json(indent=2, exclude_none=True, by_alias=True)) + with open("example.json", "w") as json_file: + json.dump(ml_model_meta.item.to_dict(), json_file, indent=4) print("Example model metadata written to ./example.json.") return ml_model_meta diff --git a/tests/test_schema.py b/tests/test_schema.py index 2c12ec9..20154f8 100644 --- a/tests/test_schema.py +++ b/tests/test_schema.py @@ -2,18 +2,15 @@ @pytest.fixture -def metadata_json(): +def mlmodel_metadata_item(): from stac_model.examples import eurosat_resnet model_metadata_stac_item = eurosat_resnet() return model_metadata_stac_item +def test_model_metadata_to_dict(mlmodel_metadata_item): + assert mlmodel_metadata_item.item.to_dict() -def test_model_metadata_to_dict(metadata_json): - assert metadata_json.to_dict() - - -def test_model_metadata_json_operations(metadata_json): - from stac_model.schema import MLModelExtension - - assert MLModelExtension(metadata_json.to_dict()) +def test_validate_model_metadata(mlmodel_metadata_item): + import pystac + assert pystac.read_dict(mlmodel_metadata_item.item.to_dict()) From 9ddff24e59c8bdbc37e1e225f0fccb384c861d8d Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Wed, 28 Feb 2024 14:43:32 -0800 Subject: [PATCH 051/112] export an example with stac common metadata, derived from link to dataset --- example.json | 230 ++++++++++++++++++++++++++++++++++++++++- stac_model/examples.py | 9 +- 2 files changed, 234 insertions(+), 5 deletions(-) diff --git a/example.json b/example.json index 790c8dd..27b52a7 100644 --- a/example.json +++ b/example.json @@ -3,4 +3,232 @@ "stac_version": "1.0.0", "id": "resnet-18_sentinel-2_all_moco_classification", "properties": { - "start_datetime": \ No newline at end of file + "start_datetime": "1900-01-01", + "end_datetime": null, + "mlm:name": "Resnet-18 Sentinel-2 ALL MOCO", + "mlm:task": "classification", + "mlm:framework": "pytorch", + "mlm:framework_version": "2.1.2+cu121", + "mlm:file_size": 1, + "mlm:memory_size": 1, + "mlm:input": [ + { + "name": "13 Band Sentinel-2 Batch", + "bands": [ + "B01", + "B02", + "B03", + "B04", + "B05", + "B06", + "B07", + "B08", + "B8A", + "B09", + "B10", + "B11", + "B12" + ], + "input_array": { + "shape": [ + -1, + 13, + 64, + 64 + ], + "dim_order": "bchw", + "data_type": "float32" + }, + "norm_by_channel": true, + "norm_type": "z_score", + "resize_type": "none", + "parameters": null, + "statistics": { + "minimum": null, + "maximum": null, + "mean": [ + 1354.40546513, + 1118.24399958, + 1042.92983953, + 947.62620298, + 1199.47283961, + 1999.79090914, + 2369.22292565, + 2296.82608323, + 732.08340178, + 12.11327804, + 1819.01027855, + 1118.92391149, + 2594.14080798 + ], + "stddev": [ + 245.71762908, + 333.00778264, + 395.09249139, + 593.75055589, + 566.4170017, + 861.18399006, + 1086.63139075, + 1117.98170791, + 404.91978886, + 4.77584468, + 1002.58768311, + 761.30323499, + 1231.58581042 + ], + "count": null, + "valid_percent": null + }, + "norm_with_clip_values": null, + "pre_processing_function": "https://github.com/microsoft/torchgeo/blob/545abe8326efc2848feae69d0212a15faba3eb00/torchgeo/datamodules/eurosat.py" + } + ], + "mlm:output": [ + { + "task": "classification", + "result_array": [ + { + "shape": [ + -1, + 10 + ], + "dim_names": [ + "batch", + "class" + ], + "data_type": "float32" + } + ], + "classification_classes": [ + { + "value": 0, + "name": "Annual Crop", + "description": null, + "title": null, + "color_hint": null, + "nodata": false + }, + { + "value": 1, + "name": "Forest", + "description": null, + "title": null, + "color_hint": null, + "nodata": false + }, + { + "value": 2, + "name": "Herbaceous Vegetation", + "description": null, + "title": null, + "color_hint": null, + "nodata": false + }, + { + "value": 3, + "name": "Highway", + "description": null, + "title": null, + "color_hint": null, + "nodata": false + }, + { + "value": 4, + "name": "Industrial Buildings", + "description": null, + "title": null, + "color_hint": null, + "nodata": false + }, + { + "value": 5, + "name": "Pasture", + "description": null, + "title": null, + "color_hint": null, + "nodata": false + }, + { + "value": 6, + "name": "Permanent Crop", + "description": null, + "title": null, + "color_hint": null, + "nodata": false + }, + { + "value": 7, + "name": "Residential Buildings", + "description": null, + "title": null, + "color_hint": null, + "nodata": false + }, + { + "value": 8, + "name": "River", + "description": null, + "title": null, + "color_hint": null, + "nodata": false + }, + { + "value": 9, + "name": "SeaLake", + "description": null, + "title": null, + "color_hint": null, + "nodata": false + } + ], + "post_processing_function": null + } + ], + "mlm:runtime": [ + { + "asset": { + "href": "https://huggingface.co/torchgeo/resnet18_sentinel2_all_moco/resolve/main/resnet18_sentinel2_all_moco-59bfdff9.pth", + "title": null, + "description": null, + "type": null, + "roles": null + }, + "source_code": { + "href": "https://github.com/microsoft/torchgeo/blob/61efd2e2c4df7ebe3bd03002ebbaeaa3cfe9885a/torchgeo/models/resnet.py#L207", + "title": null, + "description": null, + "type": null, + "roles": null + }, + "accelerator": "cuda", + "accelerator_constrained": false, + "hardware_summary": "Unknown", + "container": null, + "commit_hash": null, + "batch_size_suggestion": null + } + ], + "mlm:total_parameters": 11700000, + "mlm:pretrained_source": "EuroSat Sentinel-2", + "mlm:summary": "Sourced from torchgeo python library,identifier is ResNet18_Weights.SENTINEL2_ALL_MOCO", + "datetime": null + }, + "geometry": null, + "links": [ + { + "rel": "derived_from", + "href": "https://earth-search.aws.element84.com/v1/collections/sentinel-2-l2a", + "type": "application/json" + } + ], + "assets": {}, + "bbox": [ + -90, + -180, + 90, + 180 + ], + "stac_extensions": [ + "https://raw.githubusercontent.com/crim-ca/dlm-extension/main/json-schema/schema.json" + ] +} \ No newline at end of file diff --git a/stac_model/examples.py b/stac_model/examples.py index 40f3365..ee5853b 100644 --- a/stac_model/examples.py +++ b/stac_model/examples.py @@ -1,5 +1,3 @@ -from datetime import datetime - import pystac from stac_model.schema import ( @@ -120,7 +118,7 @@ def eurosat_resnet(): mlm_task="classification", mlm_framework="pytorch", mlm_framework_version="2.1.2+cu121", - mlm_file_size=1, + mlm_file_size=43000000, mlm_memory_size=1, mlm_summary=( "Sourced from torchgeo python library," @@ -132,7 +130,10 @@ def eurosat_resnet(): mlm_runtime=[mlm_runtime], mlm_output=[mlm_output], ) - start_datetime = datetime.strptime("1900-01-01", "%Y-%m-%d") + # TODO, this can't be serialized but pystac.item calls for a datetime + # in docs. start_datetime=datetime.strptime("1900-01-01", "%Y-%m-%d") + # Is this a problem that we don't do date validation if we supply as str? + start_datetime = "1900-01-01" end_datetime = None geometry = None bbox = [-90, -180, 90, 180] From 0bed29bd817e12df90f01395a167c4b3cd977d8b Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Thu, 7 Mar 2024 12:30:43 -0800 Subject: [PATCH 052/112] remove mlm_prefix in pydantic models --- STAC_MODEL_README.md => README_STAC_MODEL.md | 0 example.json => examples/example.json | 0 stac_model/examples.py | 34 ++++++++++---------- stac_model/schema.py | 32 +++++++++--------- 4 files changed, 33 insertions(+), 33 deletions(-) rename STAC_MODEL_README.md => README_STAC_MODEL.md (100%) rename example.json => examples/example.json (100%) diff --git a/STAC_MODEL_README.md b/README_STAC_MODEL.md similarity index 100% rename from STAC_MODEL_README.md rename to README_STAC_MODEL.md diff --git a/example.json b/examples/example.json similarity index 100% rename from example.json rename to examples/example.json diff --git a/stac_model/examples.py b/stac_model/examples.py index ee5853b..88e24f4 100644 --- a/stac_model/examples.py +++ b/stac_model/examples.py @@ -65,7 +65,7 @@ def eurosat_resnet(): 1231.58581042, ], ) - mlm_input = ModelInput( + input = ModelInput( name="13 Band Sentinel-2 Batch", bands=band_names, input_array=input_array, @@ -75,7 +75,7 @@ def eurosat_resnet(): statistics=stats, pre_processing_function="https://github.com/microsoft/torchgeo/blob/545abe8326efc2848feae69d0212a15faba3eb00/torchgeo/datamodules/eurosat.py", # noqa: E501 ) - mlm_runtime = Runtime( + runtime = Runtime( framework="torch", version="2.1.2+cu121", asset=Asset( @@ -107,28 +107,28 @@ def eurosat_resnet(): ClassObject(value=class_map[class_name], name=class_name) for class_name in class_map ] - mlm_output = ModelOutput( + output = ModelOutput( task="classification", classification_classes=class_objects, output_shape=[-1, 10], result_array=[result_array], ) ml_model_meta = MLModelProperties( - mlm_name="Resnet-18 Sentinel-2 ALL MOCO", - mlm_task="classification", - mlm_framework="pytorch", - mlm_framework_version="2.1.2+cu121", - mlm_file_size=43000000, - mlm_memory_size=1, - mlm_summary=( + name="Resnet-18 Sentinel-2 ALL MOCO", + task="classification", + framework="pytorch", + framework_version="2.1.2+cu121", + file_size=43000000, + memory_size=1, + summary=( "Sourced from torchgeo python library," "identifier is ResNet18_Weights.SENTINEL2_ALL_MOCO" ), - mlm_pretrained_source="EuroSat Sentinel-2", - mlm_total_parameters=11_700_000, - mlm_input=[mlm_input], - mlm_runtime=[mlm_runtime], - mlm_output=[mlm_output], + pretrained_source="EuroSat Sentinel-2", + total_parameters=11_700_000, + input=[input], + runtime=[runtime], + output=[output], ) # TODO, this can't be serialized but pystac.item calls for a datetime # in docs. start_datetime=datetime.strptime("1900-01-01", "%Y-%m-%d") @@ -138,8 +138,8 @@ def eurosat_resnet(): geometry = None bbox = [-90, -180, 90, 180] name = ( - "_".join(ml_model_meta.mlm_name.split(" ")).lower() - + f"_{ml_model_meta.mlm_task}".lower() + "_".join(ml_model_meta.name.split(" ")).lower() + + f"_{ml_model_meta.task}".lower() ) item = pystac.Item( id=name, diff --git a/stac_model/schema.py b/stac_model/schema.py index cbf4ac5..03b7c50 100644 --- a/stac_model/schema.py +++ b/stac_model/schema.py @@ -38,29 +38,29 @@ PREFIX = f"{get_args(SchemaName)[0]}:" -def mlm_prefix_replacer(field_name: str) -> str: - return field_name.replace("mlm_", "mlm:") +def mlm_prefix_adder(field_name: str) -> str: + return "mlm:" + field_name class MLModelProperties(BaseModel): - mlm_name: str - mlm_task: TaskEnum - mlm_framework: str - mlm_framework_version: str - mlm_file_size: int - mlm_memory_size: int - mlm_input: List[ModelInput] - mlm_output: List[ModelOutput] - mlm_runtime: List[Runtime] - mlm_total_parameters: int - mlm_pretrained_source: str - mlm_summary: str - mlm_parameters: Optional[ + name: str + task: TaskEnum + framework: str + framework_version: str + file_size: int + memory_size: int + input: List[ModelInput] + output: List[ModelOutput] + runtime: List[Runtime] + total_parameters: int + pretrained_source: str + summary: str + parameters: Optional[ Dict[str, Union[int, str, bool, List[Union[int, str, bool]]]] ] = None # noqa: E501 model_config = ConfigDict( - alias_generator=mlm_prefix_replacer, populate_by_name=True, extra="ignore" + alias_generator=mlm_prefix_adder, populate_by_name=True, extra="ignore" ) From bf3b07f767be07855414d218568f37a707cbddf9 Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Thu, 7 Mar 2024 15:15:04 -0800 Subject: [PATCH 053/112] address comments --- README.md | 24 +++--- README_STAC_MODEL.md | 165 +---------------------------------------- stac_model/examples.py | 14 +++- 3 files changed, 24 insertions(+), 179 deletions(-) diff --git a/README.md b/README.md index 38fa54f..5fac643 100644 --- a/README.md +++ b/README.md @@ -111,16 +111,18 @@ Note: It is common in the machine learning, computer vision, and remote sensing ### Runtime Object -| Field Name | Type | Description | -|-------------------------|---------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| model_asset | [Asset Object](stac-asset) | **REQUIRED.** Asset object containing URI to the model file. | -| source_code | [Asset Object](stac-asset) | **REQUIRED.** Source code description. Can describe a github repo, zip archive, etc. This description should reference the inference function, for example my_package.my_module.predict | -| accelerator | [Accelerator Enum](#accelerator-enum) | **REQUIRED.** The intended computational hardware that runs inference. | -| accelerator_constrained | boolean | **REQUIRED.** True if the intended `accelerator` is the only `accelerator` that can run inference. False if other accelerators, such as amd64 (CPU), can run inference. | -| hardware_summary | string | **REQUIRED.** A high level description of the number of accelerators, specific generation of the `accelerator`, or other relevant inference details. | -| container | [Container](#container) | **RECOMMENDED.** Information to run the model in a container instance. | -| model_commit_hash | string | Hash value pointing to a specific version of the code. | -| batch_size_suggestion | number | A suggested batch size for the accelerator and summarized hardware. | +| Field Name | Type | Description | +| ----------------------- | ------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| model_asset | [Asset Object](stac-asset) | **REQUIRED.** Asset object containing URI to the model file. Recommended asset `roles` include `weights` for model weights that need to be loaded by a model definition and `compiled` for models that can be loaded directly without an intermediate model definition. | +| source_code | [Asset Object](stac-asset) | **REQUIRED.** Source code description. Can describe a github repo, zip archive, etc. This description should reference the inference function, for example my_package.my_module.predict | +| accelerator | [Accelerator Enum](#accelerator-enum) | **REQUIRED.** The intended computational hardware that runs inference. | +| accelerator_constrained | boolean | **REQUIRED.** True if the intended `accelerator` is the only `accelerator` that can run inference. False if other accelerators, such as amd64 (CPU), can run inference. | +| hardware_summary | string | **REQUIRED.** A high level description of the number of accelerators, specific generation of the `accelerator`, or other relevant inference details. | +| container | [Container](#container) | **RECOMMENDED.** Information to run the model in a container instance. | +| model_commit_hash | string | Hash value pointing to a specific version of the code. | +| batch_size_suggestion | number | A suggested batch size for the accelerator and summarized hardware. | + +For the `model_a` #### Accelerator Enum @@ -175,7 +177,7 @@ You can also use other base images. Pytorch and Tensorflow offer docker images f | Field Name | Type | Description | |--------------------------|-----------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | task | [Task Enum](#task-enum) | **REQUIRED.** Specifies the Machine Learning task for which the output can be used for. | -| result | [[Result Array Object](#result-array-object)] | The list of output array/tensor from the model. For example ($N \times H \times W$). Use -1 to indicate variable dimensions, like the batch dimension. | +| result_array | [[Result Array Object](#result-array-object)] | The list of output arrays/tensors from the model. | | classification:classes | [[Class Object](#class-object)] | A list of class objects adhering to the [Classification extension](https://github.com/stac-extensions/classification). | | post_processing_function | string | A url to the postprocessing function where normalization, rescaling, and other operations take place.. Or, instead, the function code path, for example: `my_package.my_module.my_processing_function` | diff --git a/README_STAC_MODEL.md b/README_STAC_MODEL.md index 4e59a86..58556c1 100644 --- a/README_STAC_MODEL.md +++ b/README_STAC_MODEL.md @@ -42,170 +42,7 @@ stac-model --help stac-model ``` -This will make an example example.json metadata file for an example model. - -Currently this looks like - -```json - "mlm_name": "Resnet-18 Sentinel-2 ALL MOCO", - "mlm_task": "classification", - "mlm_framework": "pytorch", - "mlm_framework_version": "2.1.2+cu121", - "mlm_file_size": 1, - "mlm_memory_size": 1, - "mlm_input": [ - { - "name": "13 Band Sentinel-2 Batch", - "bands": [ - "B01", - "B02", - "B03", - "B04", - "B05", - "B06", - "B07", - "B08", - "B8A", - "B09", - "B10", - "B11", - "B12" - ], - "input_array": { - "shape": [ - -1, - 13, - 64, - 64 - ], - "dim_order": "bchw", - "data_type": "float32" - }, - "norm_by_channel": true, - "norm_type": "z_score", - "statistics": { - "mean": [ - 1354.40546513, - 1118.24399958, - 1042.92983953, - 947.62620298, - 1199.47283961, - 1999.79090914, - 2369.22292565, - 2296.82608323, - 732.08340178, - 12.11327804, - 1819.01027855, - 1118.92391149, - 2594.14080798 - ], - "stddev": [ - 245.71762908, - 333.00778264, - 395.09249139, - 593.75055589, - 566.4170017, - 861.18399006, - 1086.63139075, - 1117.98170791, - 404.91978886, - 4.77584468, - 1002.58768311, - 761.30323499, - 1231.58581042 - ] - }, - "pre_processing_function": "https://github.com/microsoft/torchgeo/blob/545abe8326efc2848feae69d0212a15faba3eb00/torchgeo/datamodules/eurosat.py" - } - ], - "mlm_output": [ - { - "task": "classification", - "result_array": [ - { - "shape": [ - -1, - 10 - ], - "dim_names": [ - "batch", - "class" - ], - "data_type": "float32" - } - ], - "classification_classes": [ - { - "value": 0, - "name": "Annual Crop", - "nodata": false - }, - { - "value": 1, - "name": "Forest", - "nodata": false - }, - { - "value": 2, - "name": "Herbaceous Vegetation", - "nodata": false - }, - { - "value": 3, - "name": "Highway", - "nodata": false - }, - { - "value": 4, - "name": "Industrial Buildings", - "nodata": false - }, - { - "value": 5, - "name": "Pasture", - "nodata": false - }, - { - "value": 6, - "name": "Permanent Crop", - "nodata": false - }, - { - "value": 7, - "name": "Residential Buildings", - "nodata": false - }, - { - "value": 8, - "name": "River", - "nodata": false - }, - { - "value": 9, - "name": "SeaLake", - "nodata": false - } - ] - } - ], - "mlm_runtime": [ - { - "asset": { - "href": "." - }, - "source_code": { - "href": "." - }, - "accelerator": "cuda", - "accelerator_constrained": false, - "hardware_summary": "Unknown" - } - ], - "mlm_total_parameters": 11700000, - "mlm_pretrained_source": "EuroSat Sentinel-2", - "mlm_summary": "Sourced from torchgeo python library, identifier is ResNet18_Weights.SENTINEL2_ALL_MOCO" -} -``` +This will make [this example item](./examples/example.json) for an example model. ## :chart_with_upwards_trend: Releases diff --git a/stac_model/examples.py b/stac_model/examples.py index 88e24f4..6831222 100644 --- a/stac_model/examples.py +++ b/stac_model/examples.py @@ -1,5 +1,6 @@ import pystac - +import json +import shapely from stac_model.schema import ( Asset, ClassObject, @@ -73,7 +74,7 @@ def eurosat_resnet(): norm_type="z_score", resize_type="none", statistics=stats, - pre_processing_function="https://github.com/microsoft/torchgeo/blob/545abe8326efc2848feae69d0212a15faba3eb00/torchgeo/datamodules/eurosat.py", # noqa: E501 + pre_processing_function="torchgeo.datamodules.eurosat.EuroSATDataModule.collate_fn", # noqa: E501 ) runtime = Runtime( framework="torch", @@ -135,8 +136,13 @@ def eurosat_resnet(): # Is this a problem that we don't do date validation if we supply as str? start_datetime = "1900-01-01" end_datetime = None - geometry = None - bbox = [-90, -180, 90, 180] + bbox = [ + -7.882190080512502, + 37.13739173208318, + 27.911651652899923, + 58.21798141355221 + ] + geometry = json.dumps(shapely.geometry.Polygon.from_bounds(*bbox).__geo_interface__, indent=2) name = ( "_".join(ml_model_meta.name.split(" ")).lower() + f"_{ml_model_meta.task}".lower() From c52daa77d3708b028296e455280ac7a6f8136407 Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Thu, 7 Mar 2024 17:02:02 -0800 Subject: [PATCH 054/112] update poetry, remove s3Path since it fails with recent pydantic and is unused --- .gitignore | 1 + poetry.lock | 280 +++++++++++++++++++++++------------------- pyproject.toml | 4 +- stac_model/paths.py | 49 -------- stac_model/runtime.py | 4 +- 5 files changed, 159 insertions(+), 179 deletions(-) delete mode 100644 stac_model/paths.py diff --git a/.gitignore b/.gitignore index 98270d4..a556189 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ +Untitled.ipynb /package-lock.json /node_modules .vscode diff --git a/poetry.lock b/poetry.lock index 55aef26..b06d9d6 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. [[package]] name = "annotated-types" @@ -598,18 +598,18 @@ files = [ [[package]] name = "pydantic" -version = "2.3.0" +version = "2.6.3" description = "Data validation using Python type hints" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "pydantic-2.3.0-py3-none-any.whl", hash = "sha256:45b5e446c6dfaad9444819a293b921a40e1db1aa61ea08aede0522529ce90e81"}, - {file = "pydantic-2.3.0.tar.gz", hash = "sha256:1607cc106602284cd4a00882986570472f193fde9cb1259bceeaedb26aa79a6d"}, + {file = "pydantic-2.6.3-py3-none-any.whl", hash = "sha256:72c6034df47f46ccdf81869fddb81aade68056003900a8724a4f160700016a2a"}, + {file = "pydantic-2.6.3.tar.gz", hash = "sha256:e07805c4c7f5c6826e33a1d4c9d47950d7eaf34868e2690f8594d2e30241f11f"}, ] [package.dependencies] annotated-types = ">=0.4.0" -pydantic-core = "2.6.3" +pydantic-core = "2.16.3" typing-extensions = ">=4.6.1" [package.extras] @@ -617,117 +617,90 @@ email = ["email-validator (>=2.0.0)"] [[package]] name = "pydantic-core" -version = "2.6.3" +version = "2.16.3" description = "" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "pydantic_core-2.6.3-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:1a0ddaa723c48af27d19f27f1c73bdc615c73686d763388c8683fe34ae777bad"}, - {file = "pydantic_core-2.6.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5cfde4fab34dd1e3a3f7f3db38182ab6c95e4ea91cf322242ee0be5c2f7e3d2f"}, - {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5493a7027bfc6b108e17c3383959485087d5942e87eb62bbac69829eae9bc1f7"}, - {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:84e87c16f582f5c753b7f39a71bd6647255512191be2d2dbf49458c4ef024588"}, - {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:522a9c4a4d1924facce7270c84b5134c5cabcb01513213662a2e89cf28c1d309"}, - {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aaafc776e5edc72b3cad1ccedb5fd869cc5c9a591f1213aa9eba31a781be9ac1"}, - {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a750a83b2728299ca12e003d73d1264ad0440f60f4fc9cee54acc489249b728"}, - {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9e8b374ef41ad5c461efb7a140ce4730661aadf85958b5c6a3e9cf4e040ff4bb"}, - {file = "pydantic_core-2.6.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b594b64e8568cf09ee5c9501ede37066b9fc41d83d58f55b9952e32141256acd"}, - {file = "pydantic_core-2.6.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:2a20c533cb80466c1d42a43a4521669ccad7cf2967830ac62c2c2f9cece63e7e"}, - {file = "pydantic_core-2.6.3-cp310-none-win32.whl", hash = "sha256:04fe5c0a43dec39aedba0ec9579001061d4653a9b53a1366b113aca4a3c05ca7"}, - {file = "pydantic_core-2.6.3-cp310-none-win_amd64.whl", hash = "sha256:6bf7d610ac8f0065a286002a23bcce241ea8248c71988bda538edcc90e0c39ad"}, - {file = "pydantic_core-2.6.3-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:6bcc1ad776fffe25ea5c187a028991c031a00ff92d012ca1cc4714087e575973"}, - {file = "pydantic_core-2.6.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:df14f6332834444b4a37685810216cc8fe1fe91f447332cd56294c984ecbff1c"}, - {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0b7486d85293f7f0bbc39b34e1d8aa26210b450bbd3d245ec3d732864009819"}, - {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a892b5b1871b301ce20d40b037ffbe33d1407a39639c2b05356acfef5536d26a"}, - {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:883daa467865e5766931e07eb20f3e8152324f0adf52658f4d302242c12e2c32"}, - {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d4eb77df2964b64ba190eee00b2312a1fd7a862af8918ec70fc2d6308f76ac64"}, - {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ce8c84051fa292a5dc54018a40e2a1926fd17980a9422c973e3ebea017aa8da"}, - {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:22134a4453bd59b7d1e895c455fe277af9d9d9fbbcb9dc3f4a97b8693e7e2c9b"}, - {file = "pydantic_core-2.6.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:02e1c385095efbd997311d85c6021d32369675c09bcbfff3b69d84e59dc103f6"}, - {file = "pydantic_core-2.6.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d79f1f2f7ebdb9b741296b69049ff44aedd95976bfee38eb4848820628a99b50"}, - {file = "pydantic_core-2.6.3-cp311-none-win32.whl", hash = "sha256:430ddd965ffd068dd70ef4e4d74f2c489c3a313adc28e829dd7262cc0d2dd1e8"}, - {file = "pydantic_core-2.6.3-cp311-none-win_amd64.whl", hash = "sha256:84f8bb34fe76c68c9d96b77c60cef093f5e660ef8e43a6cbfcd991017d375950"}, - {file = "pydantic_core-2.6.3-cp311-none-win_arm64.whl", hash = "sha256:5a2a3c9ef904dcdadb550eedf3291ec3f229431b0084666e2c2aa8ff99a103a2"}, - {file = "pydantic_core-2.6.3-cp312-cp312-macosx_10_7_x86_64.whl", hash = "sha256:8421cf496e746cf8d6b677502ed9a0d1e4e956586cd8b221e1312e0841c002d5"}, - {file = "pydantic_core-2.6.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:bb128c30cf1df0ab78166ded1ecf876620fb9aac84d2413e8ea1594b588c735d"}, - {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37a822f630712817b6ecc09ccc378192ef5ff12e2c9bae97eb5968a6cdf3b862"}, - {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:240a015102a0c0cc8114f1cba6444499a8a4d0333e178bc504a5c2196defd456"}, - {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3f90e5e3afb11268628c89f378f7a1ea3f2fe502a28af4192e30a6cdea1e7d5e"}, - {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:340e96c08de1069f3d022a85c2a8c63529fd88709468373b418f4cf2c949fb0e"}, - {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1480fa4682e8202b560dcdc9eeec1005f62a15742b813c88cdc01d44e85308e5"}, - {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f14546403c2a1d11a130b537dda28f07eb6c1805a43dae4617448074fd49c282"}, - {file = "pydantic_core-2.6.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:a87c54e72aa2ef30189dc74427421e074ab4561cf2bf314589f6af5b37f45e6d"}, - {file = "pydantic_core-2.6.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:f93255b3e4d64785554e544c1c76cd32f4a354fa79e2eeca5d16ac2e7fdd57aa"}, - {file = "pydantic_core-2.6.3-cp312-none-win32.whl", hash = "sha256:f70dc00a91311a1aea124e5f64569ea44c011b58433981313202c46bccbec0e1"}, - {file = "pydantic_core-2.6.3-cp312-none-win_amd64.whl", hash = "sha256:23470a23614c701b37252618e7851e595060a96a23016f9a084f3f92f5ed5881"}, - {file = "pydantic_core-2.6.3-cp312-none-win_arm64.whl", hash = "sha256:1ac1750df1b4339b543531ce793b8fd5c16660a95d13aecaab26b44ce11775e9"}, - {file = "pydantic_core-2.6.3-cp37-cp37m-macosx_10_7_x86_64.whl", hash = "sha256:a53e3195f134bde03620d87a7e2b2f2046e0e5a8195e66d0f244d6d5b2f6d31b"}, - {file = "pydantic_core-2.6.3-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:f2969e8f72c6236c51f91fbb79c33821d12a811e2a94b7aa59c65f8dbdfad34a"}, - {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:672174480a85386dd2e681cadd7d951471ad0bb028ed744c895f11f9d51b9ebe"}, - {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:002d0ea50e17ed982c2d65b480bd975fc41086a5a2f9c924ef8fc54419d1dea3"}, - {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3ccc13afee44b9006a73d2046068d4df96dc5b333bf3509d9a06d1b42db6d8bf"}, - {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:439a0de139556745ae53f9cc9668c6c2053444af940d3ef3ecad95b079bc9987"}, - {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d63b7545d489422d417a0cae6f9898618669608750fc5e62156957e609e728a5"}, - {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b44c42edc07a50a081672e25dfe6022554b47f91e793066a7b601ca290f71e42"}, - {file = "pydantic_core-2.6.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1c721bfc575d57305dd922e6a40a8fe3f762905851d694245807a351ad255c58"}, - {file = "pydantic_core-2.6.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:5e4a2cf8c4543f37f5dc881de6c190de08096c53986381daebb56a355be5dfe6"}, - {file = "pydantic_core-2.6.3-cp37-none-win32.whl", hash = "sha256:d9b4916b21931b08096efed090327f8fe78e09ae8f5ad44e07f5c72a7eedb51b"}, - {file = "pydantic_core-2.6.3-cp37-none-win_amd64.whl", hash = "sha256:a8acc9dedd304da161eb071cc7ff1326aa5b66aadec9622b2574ad3ffe225525"}, - {file = "pydantic_core-2.6.3-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:5e9c068f36b9f396399d43bfb6defd4cc99c36215f6ff33ac8b9c14ba15bdf6b"}, - {file = "pydantic_core-2.6.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e61eae9b31799c32c5f9b7be906be3380e699e74b2db26c227c50a5fc7988698"}, - {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d85463560c67fc65cd86153a4975d0b720b6d7725cf7ee0b2d291288433fc21b"}, - {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9616567800bdc83ce136e5847d41008a1d602213d024207b0ff6cab6753fe645"}, - {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9e9b65a55bbabda7fccd3500192a79f6e474d8d36e78d1685496aad5f9dbd92c"}, - {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f468d520f47807d1eb5d27648393519655eadc578d5dd862d06873cce04c4d1b"}, - {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9680dd23055dd874173a3a63a44e7f5a13885a4cfd7e84814be71be24fba83db"}, - {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9a718d56c4d55efcfc63f680f207c9f19c8376e5a8a67773535e6f7e80e93170"}, - {file = "pydantic_core-2.6.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8ecbac050856eb6c3046dea655b39216597e373aa8e50e134c0e202f9c47efec"}, - {file = "pydantic_core-2.6.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:788be9844a6e5c4612b74512a76b2153f1877cd845410d756841f6c3420230eb"}, - {file = "pydantic_core-2.6.3-cp38-none-win32.whl", hash = "sha256:07a1aec07333bf5adebd8264047d3dc518563d92aca6f2f5b36f505132399efc"}, - {file = "pydantic_core-2.6.3-cp38-none-win_amd64.whl", hash = "sha256:621afe25cc2b3c4ba05fff53525156d5100eb35c6e5a7cf31d66cc9e1963e378"}, - {file = "pydantic_core-2.6.3-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:813aab5bfb19c98ae370952b6f7190f1e28e565909bfc219a0909db168783465"}, - {file = "pydantic_core-2.6.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:50555ba3cb58f9861b7a48c493636b996a617db1a72c18da4d7f16d7b1b9952b"}, - {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19e20f8baedd7d987bd3f8005c146e6bcbda7cdeefc36fad50c66adb2dd2da48"}, - {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b0a5d7edb76c1c57b95df719af703e796fc8e796447a1da939f97bfa8a918d60"}, - {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f06e21ad0b504658a3a9edd3d8530e8cea5723f6ea5d280e8db8efc625b47e49"}, - {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ea053cefa008fda40f92aab937fb9f183cf8752e41dbc7bc68917884454c6362"}, - {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:171a4718860790f66d6c2eda1d95dd1edf64f864d2e9f9115840840cf5b5713f"}, - {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5ed7ceca6aba5331ece96c0e328cd52f0dcf942b8895a1ed2642de50800b79d3"}, - {file = "pydantic_core-2.6.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:acafc4368b289a9f291e204d2c4c75908557d4f36bd3ae937914d4529bf62a76"}, - {file = "pydantic_core-2.6.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:1aa712ba150d5105814e53cb141412217146fedc22621e9acff9236d77d2a5ef"}, - {file = "pydantic_core-2.6.3-cp39-none-win32.whl", hash = "sha256:44b4f937b992394a2e81a5c5ce716f3dcc1237281e81b80c748b2da6dd5cf29a"}, - {file = "pydantic_core-2.6.3-cp39-none-win_amd64.whl", hash = "sha256:9b33bf9658cb29ac1a517c11e865112316d09687d767d7a0e4a63d5c640d1b17"}, - {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-macosx_10_7_x86_64.whl", hash = "sha256:d7050899026e708fb185e174c63ebc2c4ee7a0c17b0a96ebc50e1f76a231c057"}, - {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:99faba727727b2e59129c59542284efebbddade4f0ae6a29c8b8d3e1f437beb7"}, - {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5fa159b902d22b283b680ef52b532b29554ea2a7fc39bf354064751369e9dbd7"}, - {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:046af9cfb5384f3684eeb3f58a48698ddab8dd870b4b3f67f825353a14441418"}, - {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:930bfe73e665ebce3f0da2c6d64455098aaa67e1a00323c74dc752627879fc67"}, - {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:85cc4d105747d2aa3c5cf3e37dac50141bff779545ba59a095f4a96b0a460e70"}, - {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:b25afe9d5c4f60dcbbe2b277a79be114e2e65a16598db8abee2a2dcde24f162b"}, - {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:e49ce7dc9f925e1fb010fc3d555250139df61fa6e5a0a95ce356329602c11ea9"}, - {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-macosx_10_7_x86_64.whl", hash = "sha256:2dd50d6a1aef0426a1d0199190c6c43ec89812b1f409e7fe44cb0fbf6dfa733c"}, - {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c6595b0d8c8711e8e1dc389d52648b923b809f68ac1c6f0baa525c6440aa0daa"}, - {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4ef724a059396751aef71e847178d66ad7fc3fc969a1a40c29f5aac1aa5f8784"}, - {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3c8945a105f1589ce8a693753b908815e0748f6279959a4530f6742e1994dcb6"}, - {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:c8c6660089a25d45333cb9db56bb9e347241a6d7509838dbbd1931d0e19dbc7f"}, - {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:692b4ff5c4e828a38716cfa92667661a39886e71136c97b7dac26edef18767f7"}, - {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:f1a5d8f18877474c80b7711d870db0eeef9442691fcdb00adabfc97e183ee0b0"}, - {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:3796a6152c545339d3b1652183e786df648ecdf7c4f9347e1d30e6750907f5bb"}, - {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:b962700962f6e7a6bd77e5f37320cabac24b4c0f76afeac05e9f93cf0c620014"}, - {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:56ea80269077003eaa59723bac1d8bacd2cd15ae30456f2890811efc1e3d4413"}, - {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75c0ebbebae71ed1e385f7dfd9b74c1cff09fed24a6df43d326dd7f12339ec34"}, - {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:252851b38bad3bfda47b104ffd077d4f9604a10cb06fe09d020016a25107bf98"}, - {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:6656a0ae383d8cd7cc94e91de4e526407b3726049ce8d7939049cbfa426518c8"}, - {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:d9140ded382a5b04a1c030b593ed9bf3088243a0a8b7fa9f071a5736498c5483"}, - {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:d38bbcef58220f9c81e42c255ef0bf99735d8f11edef69ab0b499da77105158a"}, - {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:c9d469204abcca28926cbc28ce98f28e50e488767b084fb3fbdf21af11d3de26"}, - {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:48c1ed8b02ffea4d5c9c220eda27af02b8149fe58526359b3c07eb391cb353a2"}, - {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b2b1bfed698fa410ab81982f681f5b1996d3d994ae8073286515ac4d165c2e7"}, - {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf9d42a71a4d7a7c1f14f629e5c30eac451a6fc81827d2beefd57d014c006c4a"}, - {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4292ca56751aebbe63a84bbfc3b5717abb09b14d4b4442cc43fd7c49a1529efd"}, - {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:7dc2ce039c7290b4ef64334ec7e6ca6494de6eecc81e21cb4f73b9b39991408c"}, - {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:615a31b1629e12445c0e9fc8339b41aaa6cc60bd53bf802d5fe3d2c0cda2ae8d"}, - {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:1fa1f6312fb84e8c281f32b39affe81984ccd484da6e9d65b3d18c202c666149"}, - {file = "pydantic_core-2.6.3.tar.gz", hash = "sha256:1508f37ba9e3ddc0189e6ff4e2228bd2d3c3a4641cbe8c07177162f76ed696c7"}, + {file = "pydantic_core-2.16.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:75b81e678d1c1ede0785c7f46690621e4c6e63ccd9192af1f0bd9d504bbb6bf4"}, + {file = "pydantic_core-2.16.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9c865a7ee6f93783bd5d781af5a4c43dadc37053a5b42f7d18dc019f8c9d2bd1"}, + {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:162e498303d2b1c036b957a1278fa0899d02b2842f1ff901b6395104c5554a45"}, + {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2f583bd01bbfbff4eaee0868e6fc607efdfcc2b03c1c766b06a707abbc856187"}, + {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b926dd38db1519ed3043a4de50214e0d600d404099c3392f098a7f9d75029ff8"}, + {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:716b542728d4c742353448765aa7cdaa519a7b82f9564130e2b3f6766018c9ec"}, + {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc4ad7f7ee1a13d9cb49d8198cd7d7e3aa93e425f371a68235f784e99741561f"}, + {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bd87f48924f360e5d1c5f770d6155ce0e7d83f7b4e10c2f9ec001c73cf475c99"}, + {file = "pydantic_core-2.16.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0df446663464884297c793874573549229f9eca73b59360878f382a0fc085979"}, + {file = "pydantic_core-2.16.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4df8a199d9f6afc5ae9a65f8f95ee52cae389a8c6b20163762bde0426275b7db"}, + {file = "pydantic_core-2.16.3-cp310-none-win32.whl", hash = "sha256:456855f57b413f077dff513a5a28ed838dbbb15082ba00f80750377eed23d132"}, + {file = "pydantic_core-2.16.3-cp310-none-win_amd64.whl", hash = "sha256:732da3243e1b8d3eab8c6ae23ae6a58548849d2e4a4e03a1924c8ddf71a387cb"}, + {file = "pydantic_core-2.16.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:519ae0312616026bf4cedc0fe459e982734f3ca82ee8c7246c19b650b60a5ee4"}, + {file = "pydantic_core-2.16.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b3992a322a5617ded0a9f23fd06dbc1e4bd7cf39bc4ccf344b10f80af58beacd"}, + {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8d62da299c6ecb04df729e4b5c52dc0d53f4f8430b4492b93aa8de1f541c4aac"}, + {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2acca2be4bb2f2147ada8cac612f8a98fc09f41c89f87add7256ad27332c2fda"}, + {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1b662180108c55dfbf1280d865b2d116633d436cfc0bba82323554873967b340"}, + {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e7c6ed0dc9d8e65f24f5824291550139fe6f37fac03788d4580da0d33bc00c97"}, + {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a6b1bb0827f56654b4437955555dc3aeeebeddc47c2d7ed575477f082622c49e"}, + {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e56f8186d6210ac7ece503193ec84104da7ceb98f68ce18c07282fcc2452e76f"}, + {file = "pydantic_core-2.16.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:936e5db01dd49476fa8f4383c259b8b1303d5dd5fb34c97de194560698cc2c5e"}, + {file = "pydantic_core-2.16.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:33809aebac276089b78db106ee692bdc9044710e26f24a9a2eaa35a0f9fa70ba"}, + {file = "pydantic_core-2.16.3-cp311-none-win32.whl", hash = "sha256:ded1c35f15c9dea16ead9bffcde9bb5c7c031bff076355dc58dcb1cb436c4721"}, + {file = "pydantic_core-2.16.3-cp311-none-win_amd64.whl", hash = "sha256:d89ca19cdd0dd5f31606a9329e309d4fcbb3df860960acec32630297d61820df"}, + {file = "pydantic_core-2.16.3-cp311-none-win_arm64.whl", hash = "sha256:6162f8d2dc27ba21027f261e4fa26f8bcb3cf9784b7f9499466a311ac284b5b9"}, + {file = "pydantic_core-2.16.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:0f56ae86b60ea987ae8bcd6654a887238fd53d1384f9b222ac457070b7ac4cff"}, + {file = "pydantic_core-2.16.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c9bd22a2a639e26171068f8ebb5400ce2c1bc7d17959f60a3b753ae13c632975"}, + {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4204e773b4b408062960e65468d5346bdfe139247ee5f1ca2a378983e11388a2"}, + {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f651dd19363c632f4abe3480a7c87a9773be27cfe1341aef06e8759599454120"}, + {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aaf09e615a0bf98d406657e0008e4a8701b11481840be7d31755dc9f97c44053"}, + {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8e47755d8152c1ab5b55928ab422a76e2e7b22b5ed8e90a7d584268dd49e9c6b"}, + {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:500960cb3a0543a724a81ba859da816e8cf01b0e6aaeedf2c3775d12ee49cade"}, + {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cf6204fe865da605285c34cf1172879d0314ff267b1c35ff59de7154f35fdc2e"}, + {file = "pydantic_core-2.16.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d33dd21f572545649f90c38c227cc8631268ba25c460b5569abebdd0ec5974ca"}, + {file = "pydantic_core-2.16.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:49d5d58abd4b83fb8ce763be7794d09b2f50f10aa65c0f0c1696c677edeb7cbf"}, + {file = "pydantic_core-2.16.3-cp312-none-win32.whl", hash = "sha256:f53aace168a2a10582e570b7736cc5bef12cae9cf21775e3eafac597e8551fbe"}, + {file = "pydantic_core-2.16.3-cp312-none-win_amd64.whl", hash = "sha256:0d32576b1de5a30d9a97f300cc6a3f4694c428d956adbc7e6e2f9cad279e45ed"}, + {file = "pydantic_core-2.16.3-cp312-none-win_arm64.whl", hash = "sha256:ec08be75bb268473677edb83ba71e7e74b43c008e4a7b1907c6d57e940bf34b6"}, + {file = "pydantic_core-2.16.3-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:b1f6f5938d63c6139860f044e2538baeee6f0b251a1816e7adb6cbce106a1f01"}, + {file = "pydantic_core-2.16.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2a1ef6a36fdbf71538142ed604ad19b82f67b05749512e47f247a6ddd06afdc7"}, + {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:704d35ecc7e9c31d48926150afada60401c55efa3b46cd1ded5a01bdffaf1d48"}, + {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d937653a696465677ed583124b94a4b2d79f5e30b2c46115a68e482c6a591c8a"}, + {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c9803edf8e29bd825f43481f19c37f50d2b01899448273b3a7758441b512acf8"}, + {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:72282ad4892a9fb2da25defeac8c2e84352c108705c972db82ab121d15f14e6d"}, + {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f752826b5b8361193df55afcdf8ca6a57d0232653494ba473630a83ba50d8c9"}, + {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4384a8f68ddb31a0b0c3deae88765f5868a1b9148939c3f4121233314ad5532c"}, + {file = "pydantic_core-2.16.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:a4b2bf78342c40b3dc830880106f54328928ff03e357935ad26c7128bbd66ce8"}, + {file = "pydantic_core-2.16.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:13dcc4802961b5f843a9385fc821a0b0135e8c07fc3d9949fd49627c1a5e6ae5"}, + {file = "pydantic_core-2.16.3-cp38-none-win32.whl", hash = "sha256:e3e70c94a0c3841e6aa831edab1619ad5c511199be94d0c11ba75fe06efe107a"}, + {file = "pydantic_core-2.16.3-cp38-none-win_amd64.whl", hash = "sha256:ecdf6bf5f578615f2e985a5e1f6572e23aa632c4bd1dc67f8f406d445ac115ed"}, + {file = "pydantic_core-2.16.3-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:bda1ee3e08252b8d41fa5537413ffdddd58fa73107171a126d3b9ff001b9b820"}, + {file = "pydantic_core-2.16.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:21b888c973e4f26b7a96491c0965a8a312e13be108022ee510248fe379a5fa23"}, + {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:be0ec334369316fa73448cc8c982c01e5d2a81c95969d58b8f6e272884df0074"}, + {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b5b6079cc452a7c53dd378c6f881ac528246b3ac9aae0f8eef98498a75657805"}, + {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ee8d5f878dccb6d499ba4d30d757111847b6849ae07acdd1205fffa1fc1253c"}, + {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7233d65d9d651242a68801159763d09e9ec96e8a158dbf118dc090cd77a104c9"}, + {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c6119dc90483a5cb50a1306adb8d52c66e447da88ea44f323e0ae1a5fcb14256"}, + {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:578114bc803a4c1ff9946d977c221e4376620a46cf78da267d946397dc9514a8"}, + {file = "pydantic_core-2.16.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d8f99b147ff3fcf6b3cc60cb0c39ea443884d5559a30b1481e92495f2310ff2b"}, + {file = "pydantic_core-2.16.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4ac6b4ce1e7283d715c4b729d8f9dab9627586dafce81d9eaa009dd7f25dd972"}, + {file = "pydantic_core-2.16.3-cp39-none-win32.whl", hash = "sha256:e7774b570e61cb998490c5235740d475413a1f6de823169b4cf94e2fe9e9f6b2"}, + {file = "pydantic_core-2.16.3-cp39-none-win_amd64.whl", hash = "sha256:9091632a25b8b87b9a605ec0e61f241c456e9248bfdcf7abdf344fdb169c81cf"}, + {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:36fa178aacbc277bc6b62a2c3da95226520da4f4e9e206fdf076484363895d2c"}, + {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:dcca5d2bf65c6fb591fff92da03f94cd4f315972f97c21975398bd4bd046854a"}, + {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2a72fb9963cba4cd5793854fd12f4cfee731e86df140f59ff52a49b3552db241"}, + {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b60cc1a081f80a2105a59385b92d82278b15d80ebb3adb200542ae165cd7d183"}, + {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cbcc558401de90a746d02ef330c528f2e668c83350f045833543cd57ecead1ad"}, + {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:fee427241c2d9fb7192b658190f9f5fd6dfe41e02f3c1489d2ec1e6a5ab1e04a"}, + {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f4cb85f693044e0f71f394ff76c98ddc1bc0953e48c061725e540396d5c8a2e1"}, + {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:b29eeb887aa931c2fcef5aa515d9d176d25006794610c264ddc114c053bf96fe"}, + {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a425479ee40ff021f8216c9d07a6a3b54b31c8267c6e17aa88b70d7ebd0e5e5b"}, + {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:5c5cbc703168d1b7a838668998308018a2718c2130595e8e190220238addc96f"}, + {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:99b6add4c0b39a513d323d3b93bc173dac663c27b99860dd5bf491b240d26137"}, + {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75f76ee558751746d6a38f89d60b6228fa174e5172d143886af0f85aa306fd89"}, + {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:00ee1c97b5364b84cb0bd82e9bbf645d5e2871fb8c58059d158412fee2d33d8a"}, + {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:287073c66748f624be4cef893ef9174e3eb88fe0b8a78dc22e88eca4bc357ca6"}, + {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:ed25e1835c00a332cb10c683cd39da96a719ab1dfc08427d476bce41b92531fc"}, + {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:86b3d0033580bd6bbe07590152007275bd7af95f98eaa5bd36f3da219dcd93da"}, + {file = "pydantic_core-2.16.3.tar.gz", hash = "sha256:1cac689f80a3abab2d3c0048b29eea5751114054f032a941a32de4c852c59cad"}, ] [package.dependencies] @@ -968,27 +941,27 @@ dev = ["black", "flake8", "pre-commit"] [[package]] name = "pytest-timeout" -version = "2.2.0" +version = "2.3.1" description = "pytest plugin to abort hanging tests" optional = false python-versions = ">=3.7" files = [ - {file = "pytest-timeout-2.2.0.tar.gz", hash = "sha256:3b0b95dabf3cb50bac9ef5ca912fa0cfc286526af17afc806824df20c2f72c90"}, - {file = "pytest_timeout-2.2.0-py3-none-any.whl", hash = "sha256:bde531e096466f49398a59f2dde76fa78429a09a12411466f88a07213e220de2"}, + {file = "pytest-timeout-2.3.1.tar.gz", hash = "sha256:12397729125c6ecbdaca01035b9e5239d4db97352320af155b3f5de1ba5165d9"}, + {file = "pytest_timeout-2.3.1-py3-none-any.whl", hash = "sha256:68188cb703edfc6a18fad98dc25a3c61e9f24d644b0b70f33af545219fc7813e"}, ] [package.dependencies] -pytest = ">=5.0.0" +pytest = ">=7.0.0" [[package]] name = "python-dateutil" -version = "2.8.2" +version = "2.9.0.post0" description = "Extensions to the standard Python datetime module" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" files = [ - {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, - {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, + {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, + {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, ] [package.dependencies] @@ -1076,13 +1049,13 @@ use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] [[package]] name = "rich" -version = "13.7.0" +version = "13.7.1" description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" optional = false python-versions = ">=3.7.0" files = [ - {file = "rich-13.7.0-py3-none-any.whl", hash = "sha256:6da14c108c4866ee9520bbffa71f6fe3962e193b7da68720583850cd4548e235"}, - {file = "rich-13.7.0.tar.gz", hash = "sha256:5cb5123b5cf9ee70584244246816e9114227e0b98ad9176eede6ad54bf5403fa"}, + {file = "rich-13.7.1-py3-none-any.whl", hash = "sha256:4edbae314f59eb482f54e9e30bf00d33350aaa94f4bfcd4e9e3110e64d0d7222"}, + {file = "rich-13.7.1.tar.gz", hash = "sha256:9be308cb1fe2f1f57d67ce99e95af38a1e2bc71ad9813b0e247cf7ffbcc3a432"}, ] [package.dependencies] @@ -1234,6 +1207,63 @@ docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "packaging (>=23.2)", "pip (>=19.1)", "pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-home (>=0.5)", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-ruff (>=0.2.1)", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] testing-integration = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "packaging (>=23.2)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"] +[[package]] +name = "shapely" +version = "2.0.3" +description = "Manipulation and analysis of geometric objects" +optional = false +python-versions = ">=3.7" +files = [ + {file = "shapely-2.0.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:af7e9abe180b189431b0f490638281b43b84a33a960620e6b2e8d3e3458b61a1"}, + {file = "shapely-2.0.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:98040462b36ced9671e266b95c326b97f41290d9d17504a1ee4dc313a7667b9c"}, + {file = "shapely-2.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:71eb736ef2843f23473c6e37f6180f90f0a35d740ab284321548edf4e55d9a52"}, + {file = "shapely-2.0.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:881eb9dbbb4a6419667e91fcb20313bfc1e67f53dbb392c6840ff04793571ed1"}, + {file = "shapely-2.0.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f10d2ccf0554fc0e39fad5886c839e47e207f99fdf09547bc687a2330efda35b"}, + {file = "shapely-2.0.3-cp310-cp310-win32.whl", hash = "sha256:6dfdc077a6fcaf74d3eab23a1ace5abc50c8bce56ac7747d25eab582c5a2990e"}, + {file = "shapely-2.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:64c5013dacd2d81b3bb12672098a0b2795c1bf8190cfc2980e380f5ef9d9e4d9"}, + {file = "shapely-2.0.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:56cee3e4e8159d6f2ce32e421445b8e23154fd02a0ac271d6a6c0b266a8e3cce"}, + {file = "shapely-2.0.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:619232c8276fded09527d2a9fd91a7885ff95c0ff9ecd5e3cb1e34fbb676e2ae"}, + {file = "shapely-2.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b2a7d256db6f5b4b407dc0c98dd1b2fcf1c9c5814af9416e5498d0a2e4307a4b"}, + {file = "shapely-2.0.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e45f0c8cd4583647db3216d965d49363e6548c300c23fd7e57ce17a03f824034"}, + {file = "shapely-2.0.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:13cb37d3826972a82748a450328fe02a931dcaed10e69a4d83cc20ba021bc85f"}, + {file = "shapely-2.0.3-cp311-cp311-win32.whl", hash = "sha256:9302d7011e3e376d25acd30d2d9e70d315d93f03cc748784af19b00988fc30b1"}, + {file = "shapely-2.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:6b464f2666b13902835f201f50e835f2f153f37741db88f68c7f3b932d3505fa"}, + {file = "shapely-2.0.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:e86e7cb8e331a4850e0c2a8b2d66dc08d7a7b301b8d1d34a13060e3a5b4b3b55"}, + {file = "shapely-2.0.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c91981c99ade980fc49e41a544629751a0ccd769f39794ae913e53b07b2f78b9"}, + {file = "shapely-2.0.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:bd45d456983dc60a42c4db437496d3f08a4201fbf662b69779f535eb969660af"}, + {file = "shapely-2.0.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:882fb1ffc7577e88c1194f4f1757e277dc484ba096a3b94844319873d14b0f2d"}, + {file = "shapely-2.0.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b9f2d93bff2ea52fa93245798cddb479766a18510ea9b93a4fb9755c79474889"}, + {file = "shapely-2.0.3-cp312-cp312-win32.whl", hash = "sha256:99abad1fd1303b35d991703432c9481e3242b7b3a393c186cfb02373bf604004"}, + {file = "shapely-2.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:6f555fe3304a1f40398977789bc4fe3c28a11173196df9ece1e15c5bc75a48db"}, + {file = "shapely-2.0.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a983cc418c1fa160b7d797cfef0e0c9f8c6d5871e83eae2c5793fce6a837fad9"}, + {file = "shapely-2.0.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18bddb8c327f392189a8d5d6b9a858945722d0bb95ccbd6a077b8e8fc4c7890d"}, + {file = "shapely-2.0.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:442f4dcf1eb58c5a4e3428d88e988ae153f97ab69a9f24e07bf4af8038536325"}, + {file = "shapely-2.0.3-cp37-cp37m-win32.whl", hash = "sha256:31a40b6e3ab00a4fd3a1d44efb2482278642572b8e0451abdc8e0634b787173e"}, + {file = "shapely-2.0.3-cp37-cp37m-win_amd64.whl", hash = "sha256:59b16976c2473fec85ce65cc9239bef97d4205ab3acead4e6cdcc72aee535679"}, + {file = "shapely-2.0.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:705efbce1950a31a55b1daa9c6ae1c34f1296de71ca8427974ec2f27d57554e3"}, + {file = "shapely-2.0.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:601c5c0058a6192df704cb889439f64994708563f57f99574798721e9777a44b"}, + {file = "shapely-2.0.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f24ecbb90a45c962b3b60d8d9a387272ed50dc010bfe605f1d16dfc94772d8a1"}, + {file = "shapely-2.0.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8c2a2989222c6062f7a0656e16276c01bb308bc7e5d999e54bf4e294ce62e76"}, + {file = "shapely-2.0.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42bceb9bceb3710a774ce04908fda0f28b291323da2688f928b3f213373b5aee"}, + {file = "shapely-2.0.3-cp38-cp38-win32.whl", hash = "sha256:54d925c9a311e4d109ec25f6a54a8bd92cc03481a34ae1a6a92c1fe6729b7e01"}, + {file = "shapely-2.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:300d203b480a4589adefff4c4af0b13919cd6d760ba3cbb1e56275210f96f654"}, + {file = "shapely-2.0.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:083d026e97b6c1f4a9bd2a9171c7692461092ed5375218170d91705550eecfd5"}, + {file = "shapely-2.0.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:27b6e1910094d93e9627f2664121e0e35613262fc037051680a08270f6058daf"}, + {file = "shapely-2.0.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:71b2de56a9e8c0e5920ae5ddb23b923490557ac50cb0b7fa752761bf4851acde"}, + {file = "shapely-2.0.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4d279e56bbb68d218d63f3efc80c819cedcceef0e64efbf058a1df89dc57201b"}, + {file = "shapely-2.0.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88566d01a30f0453f7d038db46bc83ce125e38e47c5f6bfd4c9c287010e9bf74"}, + {file = "shapely-2.0.3-cp39-cp39-win32.whl", hash = "sha256:58afbba12c42c6ed44c4270bc0e22f3dadff5656d711b0ad335c315e02d04707"}, + {file = "shapely-2.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:5026b30433a70911979d390009261b8c4021ff87c7c3cbd825e62bb2ffa181bc"}, + {file = "shapely-2.0.3.tar.gz", hash = "sha256:4d65d0aa7910af71efa72fd6447e02a8e5dd44da81a983de9d736d6e6ccbe674"}, +] + +[package.dependencies] +numpy = ">=1.14,<2" + +[package.extras] +docs = ["matplotlib", "numpydoc (==1.1.*)", "sphinx", "sphinx-book-theme", "sphinx-remove-toctrees"] +test = ["pytest", "pytest-cov"] + [[package]] name = "shellingham" version = "1.5.4" @@ -1381,4 +1411,4 @@ test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess [metadata] lock-version = "2.0" python-versions = "^3.10" -content-hash = "22fb0b0e7386f5abc1f2f7aa52630ace3c35cbdba9d94e75f0d5a1935f3574e9" +content-hash = "9437634706b27f73a8577b43f479ed8698df60d23b2779ce92b44dfdb531acbd" diff --git a/pyproject.toml b/pyproject.toml index bad04fe..33d2cf3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -55,10 +55,10 @@ python = "^3.10" typer = {extras = ["all"], version = "^0.9.0"} rich = "^13.7.0" -pydantic = "2.3" # bug in post 2.3 https://github.com/pydantic/pydantic/issues/7720 +pydantic = "^2.6.3" # bug in post 2.3 https://github.com/pydantic/pydantic/issues/7720 pydantic-core = "^2" pystac = "^1.9.0" - +shapely = "^2" [tool.poetry.group.dev.dependencies] mypy = "^1.0.0" diff --git a/stac_model/paths.py b/stac_model/paths.py deleted file mode 100644 index 7c67400..0000000 --- a/stac_model/paths.py +++ /dev/null @@ -1,49 +0,0 @@ -import re - -from pydantic import AnyUrl, field_validator - - -class S3Path(AnyUrl): - allowed_schemes = {"s3"} - user_required = False - max_length = 1023 - min_length = 8 - - @field_validator("url") - @classmethod - def validate_s3_url(cls, v): - if not v.startswith("s3://"): - raise ValueError("S3 path must start with s3://") - if len(v) < cls.min_length: - raise ValueError("S3 path is too short") - if len(v) > cls.max_length: - raise ValueError("S3 path is too long") - return v - - @field_validator("host") - @classmethod - def validate_bucket_name(cls, v): - if not v: - raise ValueError("Bucket name cannot be empty") - if not 3 <= len(v) <= 63: - raise ValueError("Bucket name must be between 3 and 63 characters") - if not re.match(r"^[a-z0-9.\-]+$", v): - raise ValueError( - "Bucket name can only contain lowercase, numbers, dots, and hyphens" - ) - if v.startswith("-") or v.endswith("-"): - raise ValueError("Bucket name cannot start or end with a hyphen") - if ".." in v: - raise ValueError("Bucket name cannot have consecutive periods") - return v - - @field_validator("path") - @classmethod - def validate_key(cls, v): - if "//" in v: - raise ValueError("Key must not contain double slashes") - if "\\" in v: - raise ValueError("Backslashes are not standard in S3 paths") - if "\t" in v or "\n" in v: - raise ValueError("Key cannot contain tab or newline characters") - return v.strip("/") diff --git a/stac_model/runtime.py b/stac_model/runtime.py index dc11081..b1a564a 100644 --- a/stac_model/runtime.py +++ b/stac_model/runtime.py @@ -3,15 +3,13 @@ from pydantic import AnyUrl, BaseModel, ConfigDict, FilePath -from .paths import S3Path - class Asset(BaseModel): """Information about the model location and other additional file locations. Follows the STAC Asset Object spec. """ - href: S3Path | FilePath | AnyUrl | str + href: FilePath | AnyUrl | str title: Optional[str] = None description: Optional[str] = None type: Optional[str] = None From c7c75cacf429b2e04a04fbdad998a40d247eda5d Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Thu, 7 Mar 2024 17:12:10 -0800 Subject: [PATCH 055/112] roles for asset objects --- CHANGELOG.md | 2 -- README.md | 22 ++++++++++------------ 2 files changed, 10 insertions(+), 14 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0795779..3635873 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -29,8 +29,6 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 # TODO link release here -## [Unreleased] - ### Added - Added example model architecture summary text. diff --git a/README.md b/README.md index 5fac643..5e9a014 100644 --- a/README.md +++ b/README.md @@ -111,18 +111,16 @@ Note: It is common in the machine learning, computer vision, and remote sensing ### Runtime Object -| Field Name | Type | Description | -| ----------------------- | ------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| model_asset | [Asset Object](stac-asset) | **REQUIRED.** Asset object containing URI to the model file. Recommended asset `roles` include `weights` for model weights that need to be loaded by a model definition and `compiled` for models that can be loaded directly without an intermediate model definition. | -| source_code | [Asset Object](stac-asset) | **REQUIRED.** Source code description. Can describe a github repo, zip archive, etc. This description should reference the inference function, for example my_package.my_module.predict | -| accelerator | [Accelerator Enum](#accelerator-enum) | **REQUIRED.** The intended computational hardware that runs inference. | -| accelerator_constrained | boolean | **REQUIRED.** True if the intended `accelerator` is the only `accelerator` that can run inference. False if other accelerators, such as amd64 (CPU), can run inference. | -| hardware_summary | string | **REQUIRED.** A high level description of the number of accelerators, specific generation of the `accelerator`, or other relevant inference details. | -| container | [Container](#container) | **RECOMMENDED.** Information to run the model in a container instance. | -| model_commit_hash | string | Hash value pointing to a specific version of the code. | -| batch_size_suggestion | number | A suggested batch size for the accelerator and summarized hardware. | - -For the `model_a` +| Field Name | Type | Description | +| ----------------------- | ------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| model_asset | [Asset Object](stac-asset) | **REQUIRED.** Asset object containing URI to the model file. Recommended asset `roles` include `weights` for model weights that need to be loaded by a model definition and `compiled` for models that can be loaded directly without an intermediate model definition. | +| source_code | [Asset Object](stac-asset) | **REQUIRED.** Source code description. Can describe a github repo, zip archive, etc. The `description` field in the Asset Object should reference the inference function, for example my_package.my_module.predict. Recommended asset `roles` include `code` and `metadata`, since the source code asset might also refer to more detailed metadata than this spec captures. | +| accelerator | [Accelerator Enum](#accelerator-enum) | **REQUIRED.** The intended computational hardware that runs inference. | +| accelerator_constrained | boolean | **REQUIRED.** True if the intended `accelerator` is the only `accelerator` that can run inference. False if other accelerators, such as amd64 (CPU), can run inference. | +| hardware_summary | string | **REQUIRED.** A high level description of the number of accelerators, specific generation of the `accelerator`, or other relevant inference details. | +| container | [Container](#container) | **RECOMMENDED.** Information to run the model in a container instance. | +| model_commit_hash | string | Hash value pointing to a specific version of the code. | +| batch_size_suggestion | number | A suggested batch size for the accelerator and summarized hardware. | #### Accelerator Enum From d091c2ee91da70a53176b93a95751c544366d21e Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Thu, 7 Mar 2024 17:19:47 -0800 Subject: [PATCH 056/112] specify how to use commit hash and add to example --- README.md | 2 +- stac_model/examples.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 5e9a014..3333704 100644 --- a/README.md +++ b/README.md @@ -119,7 +119,7 @@ Note: It is common in the machine learning, computer vision, and remote sensing | accelerator_constrained | boolean | **REQUIRED.** True if the intended `accelerator` is the only `accelerator` that can run inference. False if other accelerators, such as amd64 (CPU), can run inference. | | hardware_summary | string | **REQUIRED.** A high level description of the number of accelerators, specific generation of the `accelerator`, or other relevant inference details. | | container | [Container](#container) | **RECOMMENDED.** Information to run the model in a container instance. | -| model_commit_hash | string | Hash value pointing to a specific version of the code. | +| commit_hash | string | Hash value pointing to a specific version of the code used to run model inference. If this is supplied, `source code` should also be supplied and the commit hash must refer to a Git repository linked or described in the `source_code` [Asset Object](stac-asset). | | batch_size_suggestion | number | A suggested batch size for the accelerator and summarized hardware. | #### Accelerator Enum diff --git a/stac_model/examples.py b/stac_model/examples.py index 6831222..9687bd5 100644 --- a/stac_model/examples.py +++ b/stac_model/examples.py @@ -88,6 +88,7 @@ def eurosat_resnet(): accelerator="cuda", accelerator_constrained=False, hardware_summary="Unknown", + commit_hash="61efd2e2c4df7ebe3bd03002ebbaeaa3cfe9885a", ) result_array = ResultArray( shape=[-1, 10], dim_names=["batch", "class"], data_type="float32" From 9e58fdf01c5c1f63b4f2de885372d0301659fa95 Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Thu, 7 Mar 2024 18:06:57 -0800 Subject: [PATCH 057/112] fields reordered so datetimes are together --- README.md | 2 +- best-practices.md | 2 +- examples/example.json | 20 ++++++++++---------- stac_model/input.py | 2 +- 4 files changed, 13 insertions(+), 13 deletions(-) diff --git a/README.md b/README.md index 3333704..7bf6ddd 100644 --- a/README.md +++ b/README.md @@ -104,7 +104,7 @@ A deviation from the [STAC 1.1 Bands Object](https://github.com/radiantearth/sta |------------|-----------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--| | shape | [integer] | **REQUIRED.** Shape of the input n-dimensional array ($N \times C \times H \times W$), including the batch size dimension. The batch size dimension must either be greater than 0 or -1 to indicate an unspecified batch dimension size. | | | dim_order | string | **REQUIRED.** How the above dimensions are ordered within the `shape`. "bhw", "bchw", "bthw", "btchw" are valid orderings where b=batch, c=channel, t=time, h=height, w=width. | | -| data_type | enum | **REQUIRED.** The data type of values in the n-dimensional array. For model inputs, this should be the data type of the processed input supplied to the model inference function, not the data type of the source bands. Use one of the [common metadata data types](https://github.com/radiantearth/stac-spec/blob/f9b3c59ba810541c9da70c5f8d39635f8cba7bcd/item-spec/common-metadata.md#data-types). | | +| data_type | enum | **REQUIRED.** The data type of values in the n-dimensional array. For model inputs, this should be the data type of the processed input supplied to the model inference function, not the data type of the source bands. Use one of the [common metadata data types](https://github.com/stac-extensions/raster?tab=readme-ov-file#data-types). | | Note: It is common in the machine learning, computer vision, and remote sensing communities to refer to rasters that are inputs to a model as arrays or tensors. Array Objects are distinct from the JSON array type used to represent lists of values. diff --git a/best-practices.md b/best-practices.md index 8d6023c..9450d69 100644 --- a/best-practices.md +++ b/best-practices.md @@ -14,7 +14,7 @@ We recommend using at least the `processing:lineage` and `processing:level` fiel For example: -``` +```json "processing:lineage": "GRD Post Processing", "processing:level": "L1C", "processing:facility": "Copernicus S1 Core Ground Segment - DPA", diff --git a/examples/example.json b/examples/example.json index 27b52a7..41a84f2 100644 --- a/examples/example.json +++ b/examples/example.json @@ -5,11 +5,12 @@ "properties": { "start_datetime": "1900-01-01", "end_datetime": null, + "datetime": null, "mlm:name": "Resnet-18 Sentinel-2 ALL MOCO", "mlm:task": "classification", "mlm:framework": "pytorch", "mlm:framework_version": "2.1.2+cu121", - "mlm:file_size": 1, + "mlm:file_size": 43000000, "mlm:memory_size": 1, "mlm:input": [ { @@ -80,7 +81,7 @@ "valid_percent": null }, "norm_with_clip_values": null, - "pre_processing_function": "https://github.com/microsoft/torchgeo/blob/545abe8326efc2848feae69d0212a15faba3eb00/torchgeo/datamodules/eurosat.py" + "pre_processing_function": "torchgeo.datamodules.eurosat.EuroSATDataModule.collate_fn" } ], "mlm:output": [ @@ -204,16 +205,15 @@ "accelerator_constrained": false, "hardware_summary": "Unknown", "container": null, - "commit_hash": null, + "commit_hash": "61efd2e2c4df7ebe3bd03002ebbaeaa3cfe9885a", "batch_size_suggestion": null } ], "mlm:total_parameters": 11700000, "mlm:pretrained_source": "EuroSat Sentinel-2", - "mlm:summary": "Sourced from torchgeo python library,identifier is ResNet18_Weights.SENTINEL2_ALL_MOCO", - "datetime": null + "mlm:summary": "Sourced from torchgeo python library,identifier is ResNet18_Weights.SENTINEL2_ALL_MOCO" }, - "geometry": null, + "geometry": "{\n \"type\": \"Polygon\",\n \"coordinates\": [\n [\n [\n -7.882190080512502,\n 37.13739173208318\n ],\n [\n -7.882190080512502,\n 58.21798141355221\n ],\n [\n 27.911651652899923,\n 58.21798141355221\n ],\n [\n 27.911651652899923,\n 37.13739173208318\n ],\n [\n -7.882190080512502,\n 37.13739173208318\n ]\n ]\n ]\n}", "links": [ { "rel": "derived_from", @@ -223,10 +223,10 @@ ], "assets": {}, "bbox": [ - -90, - -180, - 90, - 180 + -7.882190080512502, + 37.13739173208318, + 27.911651652899923, + 58.21798141355221 ], "stac_extensions": [ "https://raw.githubusercontent.com/crim-ca/dlm-extension/main/json-schema/schema.json" diff --git a/stac_model/input.py b/stac_model/input.py index 318d766..5e315c2 100644 --- a/stac_model/input.py +++ b/stac_model/input.py @@ -8,7 +8,7 @@ class InputArray(BaseModel): dim_order: Literal["bhw", "bchw", "bthw", "btchw"] data_type: str = Field( ..., - pattern="^(uint8|uint16|uint32|uint64|int8|int16|int32|int64|float16|float32|float64)$", + pattern="^(uint8|uint16|uint32|uint64|int8|int16|int32|int64|float16|float32|float64|cint16|cint32|cfloat32|cfloat64|other)$", ) From f9f66d633e574c4c1a0b4e790ec5367fda583019 Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Thu, 7 Mar 2024 18:26:49 -0800 Subject: [PATCH 058/112] remove geometry models --- stac_model/geometry_models.py | 39 ----------------------------------- stac_model/output.py | 2 -- 2 files changed, 41 deletions(-) delete mode 100644 stac_model/geometry_models.py diff --git a/stac_model/geometry_models.py b/stac_model/geometry_models.py deleted file mode 100644 index 125e08b..0000000 --- a/stac_model/geometry_models.py +++ /dev/null @@ -1,39 +0,0 @@ -from typing import List, Literal, Union - -from pydantic import ( - BaseModel, -) - - -class Geometry(BaseModel): - type: str - coordinates: List - - -class GeoJSONPoint(Geometry): - type: Literal["Point"] - coordinates: List[float] - - -class GeoJSONMultiPoint(Geometry): - type: Literal["MultiPoint"] - coordinates: List[List[float]] - - -class GeoJSONPolygon(Geometry): - type: Literal["Polygon"] - coordinates: List[List[List[float]]] - - -class GeoJSONMultiPolygon(Geometry): - type: Literal["MultiPolygon"] - coordinates: List[List[List[List[float]]]] - - -AnyGeometry = Union[ - Geometry, - GeoJSONPoint, - GeoJSONMultiPoint, - GeoJSONPolygon, - GeoJSONMultiPolygon, -] diff --git a/stac_model/output.py b/stac_model/output.py index 543c26b..11a7b40 100644 --- a/stac_model/output.py +++ b/stac_model/output.py @@ -26,7 +26,6 @@ class ResultArray(BaseModel): pattern="^(uint8|uint16|uint32|uint64|int8|int16|int32|int64|float16|float32|float64)$", ) - class ClassObject(BaseModel): value: int name: str @@ -35,7 +34,6 @@ class ClassObject(BaseModel): color_hint: Optional[str] = None nodata: Optional[bool] = False - class ModelOutput(BaseModel): task: TaskEnum result_array: Optional[List[ResultArray]] = None From 406279c67b43b409c18f3df9fcf4ed7514368888 Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Sat, 9 Mar 2024 16:37:09 -0800 Subject: [PATCH 059/112] add roles --- examples/example.json | 14 ++++++++------ stac_model/examples.py | 4 ++-- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/examples/example.json b/examples/example.json index 41a84f2..21672ed 100644 --- a/examples/example.json +++ b/examples/example.json @@ -5,7 +5,6 @@ "properties": { "start_datetime": "1900-01-01", "end_datetime": null, - "datetime": null, "mlm:name": "Resnet-18 Sentinel-2 ALL MOCO", "mlm:task": "classification", "mlm:framework": "pytorch", @@ -189,10 +188,12 @@ { "asset": { "href": "https://huggingface.co/torchgeo/resnet18_sentinel2_all_moco/resolve/main/resnet18_sentinel2_all_moco-59bfdff9.pth", - "title": null, - "description": null, - "type": null, - "roles": null + "title": "Pytorch weights checkpoint", + "description": "A Resnet-18 classification model trained on normalized Sentinel-2 imagery with Eurosat landcover labels with torchgeo", + "type": ".pth", + "roles": [ + "weights" + ] }, "source_code": { "href": "https://github.com/microsoft/torchgeo/blob/61efd2e2c4df7ebe3bd03002ebbaeaa3cfe9885a/torchgeo/models/resnet.py#L207", @@ -211,7 +212,8 @@ ], "mlm:total_parameters": 11700000, "mlm:pretrained_source": "EuroSat Sentinel-2", - "mlm:summary": "Sourced from torchgeo python library,identifier is ResNet18_Weights.SENTINEL2_ALL_MOCO" + "mlm:summary": "Sourced from torchgeo python library,identifier is ResNet18_Weights.SENTINEL2_ALL_MOCO", + "datetime": null }, "geometry": "{\n \"type\": \"Polygon\",\n \"coordinates\": [\n [\n [\n -7.882190080512502,\n 37.13739173208318\n ],\n [\n -7.882190080512502,\n 58.21798141355221\n ],\n [\n 27.911651652899923,\n 58.21798141355221\n ],\n [\n 27.911651652899923,\n 37.13739173208318\n ],\n [\n -7.882190080512502,\n 37.13739173208318\n ]\n ]\n ]\n}", "links": [ diff --git a/stac_model/examples.py b/stac_model/examples.py index 9687bd5..106b76f 100644 --- a/stac_model/examples.py +++ b/stac_model/examples.py @@ -79,8 +79,8 @@ def eurosat_resnet(): runtime = Runtime( framework="torch", version="2.1.2+cu121", - asset=Asset( - href="https://huggingface.co/torchgeo/resnet18_sentinel2_all_moco/resolve/main/resnet18_sentinel2_all_moco-59bfdff9.pth" # noqa: E501 + asset=Asset(title = "Pytorch weights checkpoint", description="A Resnet-18 classification model trained on normalized Sentinel-2 imagery with Eurosat landcover labels with torchgeo", # noqa: E501 + type=".pth", roles=["weights"], href="https://huggingface.co/torchgeo/resnet18_sentinel2_all_moco/resolve/main/resnet18_sentinel2_all_moco-59bfdff9.pth" # noqa: E501 ), source_code=Asset( href="https://github.com/microsoft/torchgeo/blob/61efd2e2c4df7ebe3bd03002ebbaeaa3cfe9885a/torchgeo/models/resnet.py#L207" # noqa: E501 From 90a63d48f4112c75243f7e751198a81695d61f46 Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Wed, 20 Mar 2024 10:04:46 -0700 Subject: [PATCH 060/112] changelog updates --- CHANGELOG.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3635873..759fda6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,7 +17,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - flexible [class map object](./README.md#class-map-object) and [parameters object](./README.md#parameters-object) to handle aspects of models that vary substantially in number ### Changed -- replaced normalization:mean, etc. with [statistics](./README.md#bands-and-statistics) from STAC 1.1 common metadata +- reorganized `dlm:architecture` nested fields to exist at the top level of properties as `mlm:name`, `mlm:summary` and so on to provide STAC API search capabilities. +- replaced `normalization:mean`, etc. with [statistics](./README.md#bands-and-statistics) from STAC 1.1 common metadata - added `pydantic` models for internal schema objects in `stac_model` package and published to PYPI - specified [rel_type](./README.md#relation-types) to be `derived_from` and specify how model item or collection json should be named @@ -25,7 +26,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - ### Removed -- Data Object, replaced with [Model Input Object](./README.md#model-input-object) that uses the `name` field from the[common metadata band object](https://github.com/radiantearth/stac-spec/blob/f9b3c59ba810541c9da70c5f8d39635f8cba7bcd/item-spec/common-metadata.md#bands) which also records `data_type` and `nodata` type +- Data Object, replaced with [Model Input Object](./README.md#model-input-object) that uses the `name` field from the [common metadata band object](https://github.com/radiantearth/stac-spec/blob/f9b3c59ba810541c9da70c5f8d39635f8cba7bcd/item-spec/common-metadata.md#bands) which also records `data_type` and `nodata` type # TODO link release here From 42bbebb90d7e1964bd6dd0ddc3cf3f024d38d662 Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Wed, 20 Mar 2024 10:44:41 -0700 Subject: [PATCH 061/112] address feedback on formatting and descriptions --- README.md | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 7bf6ddd..8cdef69 100644 --- a/README.md +++ b/README.md @@ -57,7 +57,7 @@ Check the original technical report for an earlier version of the Model Extensio | mlm:total_parameters | integer | Total number of model parameters, including trainable and non-trainable parameters. | | mlm:pretrained_source | string | The source of the pretraining. Can refer to popular pretraining datasets by name (i.e. Imagenet) or less known datasets by URL and description. | | mlm:summary | string | Text summary of the model and it's purpose. | -| mlm:parameters | [Parameters Object](#params-object) | Mapping with names for the parameters and their values. Some models may take additional scalars, tuples, and other non-tensor inputs like text during inference (Segment Anything). The field should be specified here if parameters apply to all Model Input Objects. If each Model Input Object has parameters, specify parameters in that object. | +| mlm:parameters | [Parameters Object](#parameters-object) | Mapping with names for the parameters and their values. Some models may take additional scalars, tuples, and other non-tensor inputs like text during inference (Segment Anything). The field should be specified here if parameters apply to all Model Input Objects. If each Model Input Object has parameters, specify parameters in that object. | In addition, fields from the following extensions must be imported in the item: - [Scientific Extension Specification][stac-ext-sci] to describe relevant publications. @@ -74,12 +74,12 @@ In addition, fields from the following extensions must be imported in the item: | name | string | **REQUIRED.** Informative name of the input variable. Example "RGB Time Series" | | | bands | [string] | **REQUIRED.** The names of the raster bands used to train or fine-tune the model, which may be all or a subset of bands available in a STAC Item's [Band Object](#bands-and-statistics). | | | input_array | [Array Object](#feature-array-object) | **REQUIRED.** The N-dimensional array object that describes the shape, dimension ordering, and data type. | | -| parameters | [Parameters Object](#params-object) | Mapping with names for the parameters and their values. Some models may take additional scalars, tuples, and other non-tensor inputs like text. | | +| parameters | [Parameters Object](#parameters-object) | Mapping with names for the parameters and their values. Some models may take additional scalars, tuples, and other non-tensor inputs like text. | | | norm_by_channel | boolean | Whether to normalize each channel by channel-wise statistics or to normalize by dataset statistics. If True, use an array of [Statistics Objects](#bands-and-statistics) that is ordered like the `bands` field in this object. | | -| norm_type | string | Normalization method. Select one option from "min_max", "z_score", "max_norm", "mean_norm", "unit_variance", "norm_with_clip", "none" | | -| resize_type | string | High-level descriptor of the rescaling method to change image shape. Select one option from "crop", "pad", "interpolation", "none". If your rescaling method combines more than one of these operations, provide the name of the operation instead | | +| norm_type | string | Normalization method. Select one option from `min_max`, `z_score`, `max_norm`, `mean_norm`, `unit_variance`, `norm_with_clip`, `none` | | +| resize_type | string | High-level descriptor of the rescaling method to change image shape. Select one option from `crop`, `pad`, `interpolation`, `none`. If your rescaling method combines more than one of these operations, provide the name of the operation instead | | | statistics | [Statistics Object](stac-statistics) `\|` [[Statistics Object](stac-statistics)] | Dataset statistics for the training dataset used to normalize the inputs. | | -| norm_with_clip_values | [integer] | If norm_type = "norm_with_clip" this array supplies a value that is less than the band maximum. The array must be the same length as "bands", each value is used to divide each band before clipping values between 0 and 1. | +| norm_with_clip_values | [integer] | If `norm_type = "norm_with_clip"` this array supplies a value that is less than the band maximum. The array must be the same length as "bands", each value is used to divide each band before clipping values between 0 and 1. | | pre_processing_function | string | A url to the preprocessing function where normalization and rescaling takes place, and any other significant operations. Or, instead, the function code path, for example: `my_python_module_name:my_processing_function` | | #### Parameters Object @@ -88,7 +88,7 @@ In addition, fields from the following extensions must be imported in the item: |---------------------------------------|------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | *parameter names depend on the model* | number `\|` string `\|` boolean `\|` array | The number of fields and their names depend on the model. Values should not be n-dimensional array inputs. If the model input can be represented as an n-dimensional array, it should instead be supplied as another [model input object](#model-input-object). | -The parameters field can either be specified in the [Model Input Object](#model-input-object) if they are associated with a specific input or as an [Item or Collection](#item-properties-and-collection-fields) field if the parameters are supplied without relation to a specific model input. +The `Parameters Object` is simply a user defined mapping of parameters to parameter values. This is meant to capture model inputs that can't be represented as n-dimensional arrays/tensors. This includes inputs like scalars, text, and booleans. The `parameters` field can either be specified in the [Model Input Object](#model-input-object) if they are associated with a specific input or as an [Item or Collection](#item-properties-and-collection-fields) field if the parameters are supplied without relation to a specific model input. For example: the [Segment Anything](https://ai.meta.com/blog/segment-anything-foundation-model-image-segmentation/) foundational model accepts a label integer for each image input. #### Bands and Statistics @@ -103,7 +103,7 @@ A deviation from the [STAC 1.1 Bands Object](https://github.com/radiantearth/sta | Field Name | Type | Description | | |------------|-----------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--| | shape | [integer] | **REQUIRED.** Shape of the input n-dimensional array ($N \times C \times H \times W$), including the batch size dimension. The batch size dimension must either be greater than 0 or -1 to indicate an unspecified batch dimension size. | | -| dim_order | string | **REQUIRED.** How the above dimensions are ordered within the `shape`. "bhw", "bchw", "bthw", "btchw" are valid orderings where b=batch, c=channel, t=time, h=height, w=width. | | +| dim_order | string | **REQUIRED.** How the above dimensions are ordered within the `shape`. `bhw`, `bchw`, `bthw`, `btchw` are valid orderings where `b`=batch, `c`=channel, `t`=time, `h`=height, w=width. | | | data_type | enum | **REQUIRED.** The data type of values in the n-dimensional array. For model inputs, this should be the data type of the processed input supplied to the model inference function, not the data type of the source bands. Use one of the [common metadata data types](https://github.com/stac-extensions/raster?tab=readme-ov-file#data-types). | | Note: It is common in the machine learning, computer vision, and remote sensing communities to refer to rasters that are inputs to a model as arrays or tensors. Array Objects are distinct from the JSON array type used to represent lists of values. From 3429deae56a83b9ed131c74a15794518d5452de0 Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Wed, 20 Mar 2024 11:01:39 -0700 Subject: [PATCH 062/112] reorg runtime fields upward and remove runtime object --- README.md | 141 ++++++++++++++++++++++++++---------------------------- 1 file changed, 68 insertions(+), 73 deletions(-) diff --git a/README.md b/README.md index 8cdef69..66d01c2 100644 --- a/README.md +++ b/README.md @@ -43,21 +43,28 @@ Check the original technical report for an earlier version of the Model Extensio ## Item Properties and Collection Fields -| Field Name | Type | Description | -|-----------------------|-----------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| mlm:name | string | **REQUIRED.** A unique name for the model. This should include but be distinct from simply naming the model architecture. If there is a publication or other published work related to the model, use the official name of the model. | -| mlm:task | [Task Enum](#task-enum) | **REQUIRED.** Specifies the primary Machine Learning task for which the output can be used for. If there are multi-modal outputs, specify the primary task and specify each task in the [Model Output Object](#model-output-object). | -| mlm:framework | string | **REQUIRED.** Framework used to train the model (ex: PyTorch, TensorFlow). | -| mlm:framework_version | string | **REQUIRED.** The `framework` library version. Some models require a specific version of the machine learning `framework` to run. | -| mlm:file_size | integer | **REQUIRED.** The size on disk of the model artifact (bytes). | -| mlm:memory_size | integer | **REQUIRED.** The in-memory size of the model on the accelerator during inference (bytes). | -| mlm:input | [[Model Input Object](#model-input-object)] | **REQUIRED.** Describes the transformation between the EO data and the model input. | -| mlm:output | [[Model Output Object](#model-output-object)] | **REQUIRED.** Describes each model output and how to interpret it. | -| mlm:runtime | [[Runtime Object](#runtime-object)] | **REQUIRED.** Describes the runtime environment(s) to run inference with the model asset(s). | -| mlm:total_parameters | integer | Total number of model parameters, including trainable and non-trainable parameters. | -| mlm:pretrained_source | string | The source of the pretraining. Can refer to popular pretraining datasets by name (i.e. Imagenet) or less known datasets by URL and description. | -| mlm:summary | string | Text summary of the model and it's purpose. | -| mlm:parameters | [Parameters Object](#parameters-object) | Mapping with names for the parameters and their values. Some models may take additional scalars, tuples, and other non-tensor inputs like text during inference (Segment Anything). The field should be specified here if parameters apply to all Model Input Objects. If each Model Input Object has parameters, specify parameters in that object. | +| Field Name | Type | Description | +| --------------------------- | --------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| mlm:name | string | **REQUIRED.** A unique name for the model. This should include but be distinct from simply naming the model architecture. If there is a publication or other published work related to the model, use the official name of the model. | +| mlm:task | [Task Enum](#task-enum) | **REQUIRED.** Specifies the primary Machine Learning task for which the output can be used for. If there are multi-modal outputs, specify the primary task and specify each task in the [Model Output Object](#model-output-object). | +| mlm:framework | string | **REQUIRED.** Framework used to train the model (ex: PyTorch, TensorFlow). | +| mlm:framework_version | string | **REQUIRED.** The `framework` library version. Some models require a specific version of the machine learning `framework` to run. | +| mlm:file_size | integer | **REQUIRED.** The size on disk of the model artifact (bytes). | +| mlm:memory_size | integer | **REQUIRED.** The in-memory size of the model on the accelerator during inference (bytes). | +| mlm:input | [[Model Input Object](#model-input-object)] | **REQUIRED.** Describes the transformation between the EO data and the model input. | +| mlm:output | [[Model Output Object](#model-output-object)] | **REQUIRED.** Describes each model output and how to interpret it. | +| mlm:accelerator | [Accelerator Enum](#accelerator-enum) | **REQUIRED.** The intended computational hardware that runs inference. | +| mlm:accelerator_constrained | boolean | **REQUIRED.** True if the intended `accelerator` is the only `accelerator` that can run inference. False if other accelerators, such as amd64 (CPU), can run inference. | +| mlm:hardware_summary | string | **REQUIRED.** A high level description of the number of accelerators, specific generation of the `accelerator`, or other relevant inference details. | +| mlm:model | [Asset Object](stac-asset) | **REQUIRED.** Asset object containing URI to the model file. Recommended asset `roles` include `weights` for model weights that need to be loaded by a model definition and `compiled` for models that can be loaded directly without an intermediate model definition. | +| mlm:source_code | [Asset Object](stac-asset) | **REQUIRED.** Source code description. Can describe a github repo, zip archive, etc. The `description` field in the Asset Object should reference the inference function, for example my_package.my_module.predict. Recommended asset `roles` include `code` and `metadata`, since the source code asset might also refer to more detailed metadata than this spec captures. | +| mlm:container | [Asset Object](stac-asset) | **RECOMMENDED.** Information to run the model in a container with URI to the container. | +| mlm:total_parameters | integer | Total number of model parameters, including trainable and non-trainable parameters. | +| mlm:pretrained_source | string | The source of the pretraining. Can refer to popular pretraining datasets by name (i.e. Imagenet) or less known datasets by URL and description. | +| mlm:summary | string | Text summary of the model and it's purpose. | +| commit_hash | string | Hash value pointing to a specific version of the code used to run model inference. If this is supplied, `source code` should also be supplied and the commit hash must refer to a Git repository linked or described in the `source_code` [Asset Object](stac-asset). | +| batch_size_suggestion | number | A suggested batch size for the accelerator and summarized hardware. | +| mlm:parameters | [Parameters Object](#parameters-object) | Mapping with names for the parameters and their values. Some models may take additional scalars, tuples, and other non-tensor inputs like text during inference (Segment Anything). The field should be specified here if parameters apply to all Model Input Objects. If each Model Input Object has parameters, specify parameters in that object. | In addition, fields from the following extensions must be imported in the item: - [Scientific Extension Specification][stac-ext-sci] to describe relevant publications. @@ -66,21 +73,36 @@ In addition, fields from the following extensions must be imported in the item: [stac-ext-sci]: https://github.com/radiantearth/stac-spec/tree/v1.0.0-beta.2/extensions/scientific/README.md [stac-ext-ver]: https://github.com/radiantearth/stac-spec/tree/v1.0.0-beta.2/extensions/version/README.md +#### Accelerator Enum + +It is recommended to define `accelerator` with one of the following values: + +- `amd64` models compatible with AMD or Intel CPUs (no hardware specific optimizations) +- `cuda` models compatible with NVIDIA GPUs +- `xla` models compiled with XLA. models trained on TPUs are typically compiled with XLA. +- `amd-rocm` models trained on AMD GPUs +- `intel-ipex-cpu` for models optimized with IPEX for Intel CPUs +- `intel-ipex-gpu` for models optimized with IPEX for Intel GPUs +- `macos-arm` for models trained on Apple Silicon + +[stac-asset]: https://github.com/radiantearth/stac-spec/blob/master/collection-spec/collection-spec.md#asset-object + ### Model Input Object -| Field Name | Type | Description | | -|-------------------------|----------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---| -| name | string | **REQUIRED.** Informative name of the input variable. Example "RGB Time Series" | | -| bands | [string] | **REQUIRED.** The names of the raster bands used to train or fine-tune the model, which may be all or a subset of bands available in a STAC Item's [Band Object](#bands-and-statistics). | | -| input_array | [Array Object](#feature-array-object) | **REQUIRED.** The N-dimensional array object that describes the shape, dimension ordering, and data type. | | -| parameters | [Parameters Object](#parameters-object) | Mapping with names for the parameters and their values. Some models may take additional scalars, tuples, and other non-tensor inputs like text. | | -| norm_by_channel | boolean | Whether to normalize each channel by channel-wise statistics or to normalize by dataset statistics. If True, use an array of [Statistics Objects](#bands-and-statistics) that is ordered like the `bands` field in this object. | | -| norm_type | string | Normalization method. Select one option from `min_max`, `z_score`, `max_norm`, `mean_norm`, `unit_variance`, `norm_with_clip`, `none` | | -| resize_type | string | High-level descriptor of the rescaling method to change image shape. Select one option from `crop`, `pad`, `interpolation`, `none`. If your rescaling method combines more than one of these operations, provide the name of the operation instead | | -| statistics | [Statistics Object](stac-statistics) `\|` [[Statistics Object](stac-statistics)] | Dataset statistics for the training dataset used to normalize the inputs. | | -| norm_with_clip_values | [integer] | If `norm_type = "norm_with_clip"` this array supplies a value that is less than the band maximum. The array must be the same length as "bands", each value is used to divide each band before clipping values between 0 and 1. | -| pre_processing_function | string | A url to the preprocessing function where normalization and rescaling takes place, and any other significant operations. Or, instead, the function code path, for example: `my_python_module_name:my_processing_function` | | +| Field Name | Type | Description | | +| ----------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --- | +| name | string | **REQUIRED.** Informative name of the input variable. Example "RGB Time Series" | | +| bands | [string] | **REQUIRED.** The names of the raster bands used to train or fine-tune the model, which may be all or a subset of bands available in a STAC Item's [Band Object](#bands-and-statistics). | | +| input_array | [Array Object](#feature-array-object) | **REQUIRED.** The N-dimensional array object that describes the shape, dimension ordering, and data type. | | +| parameters | [Parameters Object](#parameters-object) | Mapping with names for the parameters and their values. Some models may take additional scalars, tuples, and other non-tensor inputs like text. | | +| norm_by_channel | boolean | Whether to normalize each channel by channel-wise statistics or to normalize by dataset statistics. If True, use an array of [Statistics Objects](#bands-and-statistics) that is ordered like the `bands` field in this object. | | +| norm_type | string | Normalization method. Select one option from `min_max`, `z_score`, `max_norm`, `mean_norm`, `unit_variance`, `norm_with_clip`, `none` | | +| resize_type | string | High-level descriptor of the rescaling method to change image shape. Select one option from `crop`, `pad`, `interpolation`, `none`. If your rescaling method combines more than one of these operations, provide the name of the operation instead | | +| statistics | [Statistics Object](stac-statistics) `\|` [[Statistics Object](stac-statistics)] | Dataset statistics for the training dataset used to normalize the inputs. | | +| norm_with_clip_values | [integer] | If `norm_type = "norm_with_clip"` this array supplies a value that is less than the band maximum. The array must be the same length as "bands", each value is used to divide each band before clipping values between 0 and 1. | +| pre_processing_function | string | A url to the preprocessing function where normalization and rescaling takes place, and any other significant operations. Or, instead, the function code path, for example: `my_python_module_name:my_processing_function` | | + #### Parameters Object @@ -88,7 +110,7 @@ In addition, fields from the following extensions must be imported in the item: |---------------------------------------|------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | *parameter names depend on the model* | number `\|` string `\|` boolean `\|` array | The number of fields and their names depend on the model. Values should not be n-dimensional array inputs. If the model input can be represented as an n-dimensional array, it should instead be supplied as another [model input object](#model-input-object). | -The `Parameters Object` is simply a user defined mapping of parameters to parameter values. This is meant to capture model inputs that can't be represented as n-dimensional arrays/tensors. This includes inputs like scalars, text, and booleans. The `parameters` field can either be specified in the [Model Input Object](#model-input-object) if they are associated with a specific input or as an [Item or Collection](#item-properties-and-collection-fields) field if the parameters are supplied without relation to a specific model input. For example: the [Segment Anything](https://ai.meta.com/blog/segment-anything-foundation-model-image-segmentation/) foundational model accepts a label integer for each image input. +The `Parameters Object` is a user defined mapping of parameters to parameter values. This is meant to capture model inputs that can't be represented as n-dimensional arrays/tensors. This includes inputs like scalars, text, and booleans. The `parameters` field can either be specified in the [Model Input Object](#model-input-object) if they are associated with a specific input or as an [Item or Collection](#item-properties-and-collection-fields) field if the parameters are supplied without relation to a specific model input. For example: the [Segment Anything](https://ai.meta.com/blog/segment-anything-foundation-model-image-segmentation/) foundational model accepts a label integer for each image input. #### Bands and Statistics @@ -100,51 +122,23 @@ A deviation from the [STAC 1.1 Bands Object](https://github.com/radiantearth/sta #### Array Object -| Field Name | Type | Description | | -|------------|-----------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--| -| shape | [integer] | **REQUIRED.** Shape of the input n-dimensional array ($N \times C \times H \times W$), including the batch size dimension. The batch size dimension must either be greater than 0 or -1 to indicate an unspecified batch dimension size. | | -| dim_order | string | **REQUIRED.** How the above dimensions are ordered within the `shape`. `bhw`, `bchw`, `bthw`, `btchw` are valid orderings where `b`=batch, `c`=channel, `t`=time, `h`=height, w=width. | | -| data_type | enum | **REQUIRED.** The data type of values in the n-dimensional array. For model inputs, this should be the data type of the processed input supplied to the model inference function, not the data type of the source bands. Use one of the [common metadata data types](https://github.com/stac-extensions/raster?tab=readme-ov-file#data-types). | | - +| Field Name | Type | Description | | +| ---------- | --------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --- | +| shape | [integer] | **REQUIRED.** Shape of the input n-dimensional array ($N \times C \times H \times W$), including the batch size dimension. The batch size dimension must either be greater than 0 or -1 to indicate an unspecified batch dimension size. | | +| dim_order | string | **REQUIRED.** How the above dimensions are ordered within the `shape`. `bhw`, `bchw`, `bthw`, `btchw` are valid orderings where `b`=batch, `c`=channel, `t`=time, `h`=height, w=width. | | +| data_type | enum | **REQUIRED.** The data type of values in the n-dimensional array. For model inputs, this should be the data type of the processed input supplied to the model inference function, not the data type of the source bands. Use one of the [common metadata data types](https://github.com/stac-extensions/raster?tab=readme-ov-file#data-types). | | Note: It is common in the machine learning, computer vision, and remote sensing communities to refer to rasters that are inputs to a model as arrays or tensors. Array Objects are distinct from the JSON array type used to represent lists of values. +#### Container Asset -### Runtime Object - -| Field Name | Type | Description | -| ----------------------- | ------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| model_asset | [Asset Object](stac-asset) | **REQUIRED.** Asset object containing URI to the model file. Recommended asset `roles` include `weights` for model weights that need to be loaded by a model definition and `compiled` for models that can be loaded directly without an intermediate model definition. | -| source_code | [Asset Object](stac-asset) | **REQUIRED.** Source code description. Can describe a github repo, zip archive, etc. The `description` field in the Asset Object should reference the inference function, for example my_package.my_module.predict. Recommended asset `roles` include `code` and `metadata`, since the source code asset might also refer to more detailed metadata than this spec captures. | -| accelerator | [Accelerator Enum](#accelerator-enum) | **REQUIRED.** The intended computational hardware that runs inference. | -| accelerator_constrained | boolean | **REQUIRED.** True if the intended `accelerator` is the only `accelerator` that can run inference. False if other accelerators, such as amd64 (CPU), can run inference. | -| hardware_summary | string | **REQUIRED.** A high level description of the number of accelerators, specific generation of the `accelerator`, or other relevant inference details. | -| container | [Container](#container) | **RECOMMENDED.** Information to run the model in a container instance. | -| commit_hash | string | Hash value pointing to a specific version of the code used to run model inference. If this is supplied, `source code` should also be supplied and the commit hash must refer to a Git repository linked or described in the `source_code` [Asset Object](stac-asset). | -| batch_size_suggestion | number | A suggested batch size for the accelerator and summarized hardware. | - -#### Accelerator Enum - -It is recommended to define `accelerator` with one of the following values: - -- `amd64` models compatible with AMD or Intel CPUs (no hardware specific optimizations) -- `cuda` models compatible with NVIDIA GPUs -- `xla` models compiled with XLA. models trained on TPUs are typically compiled with XLA. -- `amd-rocm` models trained on AMD GPUs -- `intel-ipex-cpu` for models optimized with IPEX for Intel CPUs -- `intel-ipex-gpu` for models optimized with IPEX for Intel GPUs -- `macos-arm` for models trained on Apple Silicon - -[stac-asset]: https://github.com/radiantearth/stac-spec/blob/master/collection-spec/collection-spec.md#asset-object - -#### Container Object - -| Field Name | Type | Description | -|----------------|--------|-------------------------------------------------------| -| container_file | string | Url of the container file (Dockerfile). | -| image_name | string | Name of the container image. | -| tag | string | Tag of the image. | -| working_dir | string | Working directory in the instance that can be mapped. | -| run | string | Running command. | +| Field Name | Type | Description | +| ----------- | ------ | ----------------------------------------------------- | +| title | string | Description of the container. | +| href | string | Url of the container file (Dockerfile). | +| type | string | "application/vnd.oci.image.index.v1+json" | +| roles | string | ["runtime"] | +| working_dir | string | Working directory in the instance that can be mapped. | +| run | string | Running command. | If you're unsure how to containerize your model, we suggest starting from the latest official container image for your framework that works with your model and pinning the container version. @@ -173,12 +167,13 @@ You can also use other base images. Pytorch and Tensorflow offer docker images f ### Model Output Object | Field Name | Type | Description | -|--------------------------|-----------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| ------------------------ | --------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | | task | [Task Enum](#task-enum) | **REQUIRED.** Specifies the Machine Learning task for which the output can be used for. | -| result_array | [[Result Array Object](#result-array-object)] | The list of output arrays/tensors from the model. | -| classification:classes | [[Class Object](#class-object)] | A list of class objects adhering to the [Classification extension](https://github.com/stac-extensions/classification). | +| result_array | [[Result Array Object](#result-array-object)] | The list of output arrays/tensors from the model. | +| classification:classes | [[Class Object](#class-object)] | A list of class objects adhering to the [Classification extension](https://github.com/stac-extensions/classification). | | post_processing_function | string | A url to the postprocessing function where normalization, rescaling, and other operations take place.. Or, instead, the function code path, for example: `my_package.my_module.my_processing_function` | + While only `task` is a required field, all fields are recommended for supervised tasks that produce a fixed shape tensor and have output classes. `image-captioning`, `multi-modal`, and `generative` tasks may not return fixed shape tensors or classes. From 2570e621fa65cbec37ce5c58bc1393e53d3dd12e Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Wed, 20 Mar 2024 11:15:36 -0700 Subject: [PATCH 063/112] add asset descriptions --- README.md | 81 ++++++++++++++++++++++++++++++++++--------------------- 1 file changed, 51 insertions(+), 30 deletions(-) diff --git a/README.md b/README.md index 66d01c2..bd90a6e 100644 --- a/README.md +++ b/README.md @@ -73,19 +73,6 @@ In addition, fields from the following extensions must be imported in the item: [stac-ext-sci]: https://github.com/radiantearth/stac-spec/tree/v1.0.0-beta.2/extensions/scientific/README.md [stac-ext-ver]: https://github.com/radiantearth/stac-spec/tree/v1.0.0-beta.2/extensions/version/README.md -#### Accelerator Enum - -It is recommended to define `accelerator` with one of the following values: - -- `amd64` models compatible with AMD or Intel CPUs (no hardware specific optimizations) -- `cuda` models compatible with NVIDIA GPUs -- `xla` models compiled with XLA. models trained on TPUs are typically compiled with XLA. -- `amd-rocm` models trained on AMD GPUs -- `intel-ipex-cpu` for models optimized with IPEX for Intel CPUs -- `intel-ipex-gpu` for models optimized with IPEX for Intel GPUs -- `macos-arm` for models trained on Apple Silicon - -[stac-asset]: https://github.com/radiantearth/stac-spec/blob/master/collection-spec/collection-spec.md#asset-object ### Model Input Object @@ -103,33 +90,41 @@ It is recommended to define `accelerator` with one of the following values: | norm_with_clip_values | [integer] | If `norm_type = "norm_with_clip"` this array supplies a value that is less than the band maximum. The array must be the same length as "bands", each value is used to divide each band before clipping values between 0 and 1. | | pre_processing_function | string | A url to the preprocessing function where normalization and rescaling takes place, and any other significant operations. Or, instead, the function code path, for example: `my_python_module_name:my_processing_function` | | +### Accelerator Enum -#### Parameters Object +It is recommended to define `accelerator` with one of the following values: -| Field Name | Type | Description | -|---------------------------------------|------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| *parameter names depend on the model* | number `\|` string `\|` boolean `\|` array | The number of fields and their names depend on the model. Values should not be n-dimensional array inputs. If the model input can be represented as an n-dimensional array, it should instead be supplied as another [model input object](#model-input-object). | +- `amd64` models compatible with AMD or Intel CPUs (no hardware specific optimizations) +- `cuda` models compatible with NVIDIA GPUs +- `xla` models compiled with XLA. models trained on TPUs are typically compiled with XLA. +- `amd-rocm` models trained on AMD GPUs +- `intel-ipex-cpu` for models optimized with IPEX for Intel CPUs +- `intel-ipex-gpu` for models optimized with IPEX for Intel GPUs +- `macos-arm` for models trained on Apple Silicon -The `Parameters Object` is a user defined mapping of parameters to parameter values. This is meant to capture model inputs that can't be represented as n-dimensional arrays/tensors. This includes inputs like scalars, text, and booleans. The `parameters` field can either be specified in the [Model Input Object](#model-input-object) if they are associated with a specific input or as an [Item or Collection](#item-properties-and-collection-fields) field if the parameters are supplied without relation to a specific model input. For example: the [Segment Anything](https://ai.meta.com/blog/segment-anything-foundation-model-image-segmentation/) foundational model accepts a label integer for each image input. +[stac-asset]: https://github.com/radiantearth/stac-spec/blob/master/collection-spec/collection-spec.md#asset-object -#### Bands and Statistics +### mlm:model Asset -We use the [STAC 1.1 Bands Object](https://github.com/radiantearth/stac-spec/pull/1254) for representing bands information, including the nodata value, data type, and common band names. Only bands used to train or fine tune the model should be included in this `bands` field. +| Field Name | Type | Description | +| ---------- | ------ | ------------------------------------------------------------------------- | +| title | string | Description of the model asset. | +| href | string | Url to the checkoint or model artifact. | +| type | string | "application/x-pytorch" or specify another appropriate custom media type. | +| roles | string | Specify one or more of ["model", "weights", "compiled"] | -A deviation from the [STAC 1.1 Bands Object](https://github.com/radiantearth/stac-spec/pull/1254) is that we do not include the [Statistics](stac-statistics) object at the band object level, but at the Model Input level. This is because in machine learning, it is common to only need overall statistics for the dataset used to train the model to normalize all bands. -[stac-statistics]: https://github.com/radiantearth/stac-spec/pull/1254/files#diff-2477b726f8c5d5d1c8b391be056db325e6918e78a24b414ccd757c7fbd574079R294 +### mlm:source_code Asset -#### Array Object +| Field Name | Type | Description | +| ---------- | ------ | ---------------------------------------------------- | +| title | string | Description of the source code. | +| href | string | Url to the repository. | +| type | string | Use media type "text/html" for code files | +| roles | string | Specify one or more of ["model", "code", "metadata"] | -| Field Name | Type | Description | | -| ---------- | --------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --- | -| shape | [integer] | **REQUIRED.** Shape of the input n-dimensional array ($N \times C \times H \times W$), including the batch size dimension. The batch size dimension must either be greater than 0 or -1 to indicate an unspecified batch dimension size. | | -| dim_order | string | **REQUIRED.** How the above dimensions are ordered within the `shape`. `bhw`, `bchw`, `bthw`, `btchw` are valid orderings where `b`=batch, `c`=channel, `t`=time, `h`=height, w=width. | | -| data_type | enum | **REQUIRED.** The data type of values in the n-dimensional array. For model inputs, this should be the data type of the processed input supplied to the model inference function, not the data type of the source bands. Use one of the [common metadata data types](https://github.com/stac-extensions/raster?tab=readme-ov-file#data-types). | | -Note: It is common in the machine learning, computer vision, and remote sensing communities to refer to rasters that are inputs to a model as arrays or tensors. Array Objects are distinct from the JSON array type used to represent lists of values. -#### Container Asset +### Container Asset | Field Name | Type | Description | | ----------- | ------ | ----------------------------------------------------- | @@ -164,6 +159,32 @@ You can also use other base images. Pytorch and Tensorflow offer docker images f - [Torchserve](https://pytorch.org/serve/) - [TFServing](https://github.com/tensorflow/serving) +### Parameters Object + +| Field Name | Type | Description | +|---------------------------------------|------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| *parameter names depend on the model* | number `\|` string `\|` boolean `\|` array | The number of fields and their names depend on the model. Values should not be n-dimensional array inputs. If the model input can be represented as an n-dimensional array, it should instead be supplied as another [model input object](#model-input-object). | + +The `Parameters Object` is a user defined mapping of parameters to parameter values. This is meant to capture model inputs that can't be represented as n-dimensional arrays/tensors. This includes inputs like scalars, text, and booleans. The `parameters` field can either be specified in the [Model Input Object](#model-input-object) if they are associated with a specific input or as an [Item or Collection](#item-properties-and-collection-fields) field if the parameters are supplied without relation to a specific model input. For example: the [Segment Anything](https://ai.meta.com/blog/segment-anything-foundation-model-image-segmentation/) foundational model accepts a label integer for each image input. + +#### Bands and Statistics + +We use the [STAC 1.1 Bands Object](https://github.com/radiantearth/stac-spec/pull/1254) for representing bands information, including the nodata value, data type, and common band names. Only bands used to train or fine tune the model should be included in this `bands` field. + +A deviation from the [STAC 1.1 Bands Object](https://github.com/radiantearth/stac-spec/pull/1254) is that we do not include the [Statistics](stac-statistics) object at the band object level, but at the Model Input level. This is because in machine learning, it is common to only need overall statistics for the dataset used to train the model to normalize all bands. + +[stac-statistics]: https://github.com/radiantearth/stac-spec/pull/1254/files#diff-2477b726f8c5d5d1c8b391be056db325e6918e78a24b414ccd757c7fbd574079R294 + +#### Array Object + +| Field Name | Type | Description | | +| ---------- | --------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --- | +| shape | [integer] | **REQUIRED.** Shape of the input n-dimensional array ($N \times C \times H \times W$), including the batch size dimension. The batch size dimension must either be greater than 0 or -1 to indicate an unspecified batch dimension size. | | +| dim_order | string | **REQUIRED.** How the above dimensions are ordered within the `shape`. `bhw`, `bchw`, `bthw`, `btchw` are valid orderings where `b`=batch, `c`=channel, `t`=time, `h`=height, w=width. | | +| data_type | enum | **REQUIRED.** The data type of values in the n-dimensional array. For model inputs, this should be the data type of the processed input supplied to the model inference function, not the data type of the source bands. Use one of the [common metadata data types](https://github.com/stac-extensions/raster?tab=readme-ov-file#data-types). | | + +Note: It is common in the machine learning, computer vision, and remote sensing communities to refer to rasters that are inputs to a model as arrays or tensors. Array Objects are distinct from the JSON array type used to represent lists of values. + ### Model Output Object | Field Name | Type | Description | From fbdb482140b1f1fba5a9c2bbacfa9841a62c1da3 Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Wed, 20 Mar 2024 11:24:56 -0700 Subject: [PATCH 064/112] move some non-search info to assets --- README.md | 62 ++++++++++++++++++++++++++++--------------------------- 1 file changed, 32 insertions(+), 30 deletions(-) diff --git a/README.md b/README.md index bd90a6e..678512a 100644 --- a/README.md +++ b/README.md @@ -43,28 +43,27 @@ Check the original technical report for an earlier version of the Model Extensio ## Item Properties and Collection Fields -| Field Name | Type | Description | -| --------------------------- | --------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| mlm:name | string | **REQUIRED.** A unique name for the model. This should include but be distinct from simply naming the model architecture. If there is a publication or other published work related to the model, use the official name of the model. | -| mlm:task | [Task Enum](#task-enum) | **REQUIRED.** Specifies the primary Machine Learning task for which the output can be used for. If there are multi-modal outputs, specify the primary task and specify each task in the [Model Output Object](#model-output-object). | -| mlm:framework | string | **REQUIRED.** Framework used to train the model (ex: PyTorch, TensorFlow). | -| mlm:framework_version | string | **REQUIRED.** The `framework` library version. Some models require a specific version of the machine learning `framework` to run. | -| mlm:file_size | integer | **REQUIRED.** The size on disk of the model artifact (bytes). | -| mlm:memory_size | integer | **REQUIRED.** The in-memory size of the model on the accelerator during inference (bytes). | -| mlm:input | [[Model Input Object](#model-input-object)] | **REQUIRED.** Describes the transformation between the EO data and the model input. | -| mlm:output | [[Model Output Object](#model-output-object)] | **REQUIRED.** Describes each model output and how to interpret it. | -| mlm:accelerator | [Accelerator Enum](#accelerator-enum) | **REQUIRED.** The intended computational hardware that runs inference. | -| mlm:accelerator_constrained | boolean | **REQUIRED.** True if the intended `accelerator` is the only `accelerator` that can run inference. False if other accelerators, such as amd64 (CPU), can run inference. | -| mlm:hardware_summary | string | **REQUIRED.** A high level description of the number of accelerators, specific generation of the `accelerator`, or other relevant inference details. | -| mlm:model | [Asset Object](stac-asset) | **REQUIRED.** Asset object containing URI to the model file. Recommended asset `roles` include `weights` for model weights that need to be loaded by a model definition and `compiled` for models that can be loaded directly without an intermediate model definition. | -| mlm:source_code | [Asset Object](stac-asset) | **REQUIRED.** Source code description. Can describe a github repo, zip archive, etc. The `description` field in the Asset Object should reference the inference function, for example my_package.my_module.predict. Recommended asset `roles` include `code` and `metadata`, since the source code asset might also refer to more detailed metadata than this spec captures. | -| mlm:container | [Asset Object](stac-asset) | **RECOMMENDED.** Information to run the model in a container with URI to the container. | -| mlm:total_parameters | integer | Total number of model parameters, including trainable and non-trainable parameters. | -| mlm:pretrained_source | string | The source of the pretraining. Can refer to popular pretraining datasets by name (i.e. Imagenet) or less known datasets by URL and description. | -| mlm:summary | string | Text summary of the model and it's purpose. | -| commit_hash | string | Hash value pointing to a specific version of the code used to run model inference. If this is supplied, `source code` should also be supplied and the commit hash must refer to a Git repository linked or described in the `source_code` [Asset Object](stac-asset). | -| batch_size_suggestion | number | A suggested batch size for the accelerator and summarized hardware. | -| mlm:parameters | [Parameters Object](#parameters-object) | Mapping with names for the parameters and their values. Some models may take additional scalars, tuples, and other non-tensor inputs like text during inference (Segment Anything). The field should be specified here if parameters apply to all Model Input Objects. If each Model Input Object has parameters, specify parameters in that object. | +| Field Name | Type | Description | +| --------------------------- | --------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| mlm:name | string | **REQUIRED.** A unique name for the model. This should include but be distinct from simply naming the model architecture. If there is a publication or other published work related to the model, use the official name of the model. | +| mlm:task | [Task Enum](#task-enum) | **REQUIRED.** Specifies the primary Machine Learning task for which the output can be used for. If there are multi-modal outputs, specify the primary task and specify each task in the [Model Output Object](#model-output-object). | +| mlm:framework | string | **REQUIRED.** Framework used to train the model (ex: PyTorch, TensorFlow). | +| mlm:framework_version | string | **REQUIRED.** The `framework` library version. Some models require a specific version of the machine learning `framework` to run. | +| mlm:file_size | integer | **REQUIRED.** The size on disk of the model artifact (bytes). | +| mlm:memory_size | integer | **REQUIRED.** The in-memory size of the model on the accelerator during inference (bytes). | +| mlm:input | [[Model Input Object](#model-input-object)] | **REQUIRED.** Describes the transformation between the EO data and the model input. | +| mlm:output | [[Model Output Object](#model-output-object)] | **REQUIRED.** Describes each model output and how to interpret it. | +| mlm:accelerator | [Accelerator Enum](#accelerator-enum) | **REQUIRED.** The intended computational hardware that runs inference. | +| mlm:accelerator_constrained | boolean | **REQUIRED.** True if the intended `accelerator` is the only `accelerator` that can run inference. False if other accelerators, such as amd64 (CPU), can run inference. | +| mlm:hardware_summary | string | **REQUIRED.** A high level description of the number of accelerators, specific generation of the `accelerator`, or other relevant inference details. | +| mlm:model | [Asset Object](stac-asset) | **REQUIRED.** Asset object containing URI to the model file. | +| mlm:source_code | [Asset Object](stac-asset) | **REQUIRED.** Source code description. Can describe a github repo, zip archive, etc. | +| mlm:container | [Asset Object](stac-asset) | **RECOMMENDED.** Information to run the model in a container with URI to the container. | +| mlm:total_parameters | integer | Total number of model parameters, including trainable and non-trainable parameters. | +| mlm:pretrained_source | string | The source of the pretraining. Can refer to popular pretraining datasets by name (i.e. Imagenet) or less known datasets by URL and description. | +| mlm:summary | string | Text summary of the model and it's purpose. | +| batch_size_suggestion | number | A suggested batch size for the accelerator and summarized hardware. | +| mlm:parameters | [Parameters Object](#parameters-object) | Mapping with names for the parameters and their values. The field should be specified here if parameters apply to all Model Input Objects. If each Model Input Object has parameters, specify parameters in that object. | In addition, fields from the following extensions must be imported in the item: - [Scientific Extension Specification][stac-ext-sci] to describe relevant publications. @@ -113,17 +112,22 @@ It is recommended to define `accelerator` with one of the following values: | type | string | "application/x-pytorch" or specify another appropriate custom media type. | | roles | string | Specify one or more of ["model", "weights", "compiled"] | +Recommended asset `roles` include `weights` for model weights that need to be loaded by a model definition and `compiled` for models that can be loaded directly without an intermediate model definition. ### mlm:source_code Asset -| Field Name | Type | Description | -| ---------- | ------ | ---------------------------------------------------- | -| title | string | Description of the source code. | -| href | string | Url to the repository. | -| type | string | Use media type "text/html" for code files | -| roles | string | Specify one or more of ["model", "code", "metadata"] | +| Field Name | Type | Description | +| ----------- | ------ | ------------------------------------------------------------------------------------------------------------------------------------------------------- | +| title | string | Title of the source code. | +| href | string | Url to the repository. | +| type | string | Use media type "text/html" for code files | +| roles | string | Specify one or more of ["model", "code", "metadata"] | +| commit_hash | string | Hash value pointing to a specific version of the code used to run model inference. The commit hash must refer to a Git repository linked in this asset. | +| description | string | Description of the source code. | +The `description` field in the Asset Object should reference the inference function, for example my_package.my_module.predict. Recommended asset `roles` include `code` and `metadata`, since the source code asset might also refer to more detailed metadata than this spec captures. + ### Container Asset | Field Name | Type | Description | @@ -226,8 +230,6 @@ STAC Collections and Items published with the model described by this extension. | dim_names | [string] | **REQUIRED.** The names of the above dimensions of the result array, ordered the same as this object's `shape` field. | | data_type | enum | **REQUIRED.** The data type of values in the n-dimensional array. For model outputs, this should be the data type of the result of the model inference without extra post processing. Use one of the [common metadata data types](https://github.com/radiantearth/stac-spec/blob/f9b3c59ba810541c9da70c5f8d39635f8cba7bcd/item-spec/common-metadata.md#data-types). | - - #### Class Object See the documentation for the [Class Object](https://github.com/stac-extensions/classification?tab=readme-ov-file#class-object). We don't use the Bit Field Object since inputs and outputs to machine learning models don't typically use bit fields. From aa3bc9b653787178884262ab3af81e3daf30c4ad Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Wed, 20 Mar 2024 11:34:42 -0700 Subject: [PATCH 065/112] linking and formatting --- README.md | 88 ++++++++++++++++++++++++++++--------------------------- 1 file changed, 45 insertions(+), 43 deletions(-) diff --git a/README.md b/README.md index 678512a..48379af 100644 --- a/README.md +++ b/README.md @@ -43,27 +43,28 @@ Check the original technical report for an earlier version of the Model Extensio ## Item Properties and Collection Fields -| Field Name | Type | Description | -| --------------------------- | --------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| mlm:name | string | **REQUIRED.** A unique name for the model. This should include but be distinct from simply naming the model architecture. If there is a publication or other published work related to the model, use the official name of the model. | -| mlm:task | [Task Enum](#task-enum) | **REQUIRED.** Specifies the primary Machine Learning task for which the output can be used for. If there are multi-modal outputs, specify the primary task and specify each task in the [Model Output Object](#model-output-object). | -| mlm:framework | string | **REQUIRED.** Framework used to train the model (ex: PyTorch, TensorFlow). | -| mlm:framework_version | string | **REQUIRED.** The `framework` library version. Some models require a specific version of the machine learning `framework` to run. | -| mlm:file_size | integer | **REQUIRED.** The size on disk of the model artifact (bytes). | -| mlm:memory_size | integer | **REQUIRED.** The in-memory size of the model on the accelerator during inference (bytes). | -| mlm:input | [[Model Input Object](#model-input-object)] | **REQUIRED.** Describes the transformation between the EO data and the model input. | -| mlm:output | [[Model Output Object](#model-output-object)] | **REQUIRED.** Describes each model output and how to interpret it. | -| mlm:accelerator | [Accelerator Enum](#accelerator-enum) | **REQUIRED.** The intended computational hardware that runs inference. | -| mlm:accelerator_constrained | boolean | **REQUIRED.** True if the intended `accelerator` is the only `accelerator` that can run inference. False if other accelerators, such as amd64 (CPU), can run inference. | -| mlm:hardware_summary | string | **REQUIRED.** A high level description of the number of accelerators, specific generation of the `accelerator`, or other relevant inference details. | -| mlm:model | [Asset Object](stac-asset) | **REQUIRED.** Asset object containing URI to the model file. | -| mlm:source_code | [Asset Object](stac-asset) | **REQUIRED.** Source code description. Can describe a github repo, zip archive, etc. | -| mlm:container | [Asset Object](stac-asset) | **RECOMMENDED.** Information to run the model in a container with URI to the container. | -| mlm:total_parameters | integer | Total number of model parameters, including trainable and non-trainable parameters. | -| mlm:pretrained_source | string | The source of the pretraining. Can refer to popular pretraining datasets by name (i.e. Imagenet) or less known datasets by URL and description. | -| mlm:summary | string | Text summary of the model and it's purpose. | -| batch_size_suggestion | number | A suggested batch size for the accelerator and summarized hardware. | -| mlm:parameters | [Parameters Object](#parameters-object) | Mapping with names for the parameters and their values. The field should be specified here if parameters apply to all Model Input Objects. If each Model Input Object has parameters, specify parameters in that object. | +| Field Name | Type | Description | +|-----------------------------|-----------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| mlm:name | string | **REQUIRED.** A unique name for the model. This should include but be distinct from simply naming the model architecture. If there is a publication or other published work related to the model, use the official name of the model. | +| mlm:task | [Task Enum](#task-enum) | **REQUIRED.** Specifies the primary Machine Learning task for which the output can be used for. If there are multi-modal outputs, specify the primary task and specify each task in the [Model Output Object](#model-output-object). | +| mlm:framework | string | **REQUIRED.** Framework used to train the model (ex: PyTorch, TensorFlow). | +| mlm:framework_version | string | **REQUIRED.** The `framework` library version. Some models require a specific version of the machine learning `framework` to run. | +| mlm:file_size | integer | **REQUIRED.** The size on disk of the model artifact (bytes). | +| mlm:memory_size | integer | **REQUIRED.** The in-memory size of the model on the accelerator during inference (bytes). | +| mlm:input | [[Model Input Object](#model-input-object)] | **REQUIRED.** Describes the transformation between the EO data and the model input. | +| mlm:output | [[Model Output Object](#model-output-object)] | **REQUIRED.** Describes each model output and how to interpret it. | +| mlm:accelerator | [Accelerator Enum](#accelerator-enum) | **REQUIRED.** The intended computational hardware that runs inference. | +| mlm:accelerator_constrained | boolean | **REQUIRED.** True if the intended `accelerator` is the only `accelerator` that can run inference. False if other accelerators, such as amd64 (CPU), can run inference. | +| mlm:hardware_summary | string | **REQUIRED.** A high level description of the number of accelerators, specific generation of the `accelerator`, or other relevant inference details. | +| mlm:model | [Asset Object](stac-asset) | **REQUIRED.** Asset object containing URI to the model file. | +| mlm:source_code | [Asset Object](stac-asset) | **REQUIRED.** Source code description. Can describe a github repo, zip archive, etc. | +| mlm:container | [Asset Object](stac-asset) | **RECOMMENDED.** Information to run the model in a container with URI to the container. | +| mlm:total_parameters | integer | Total number of model parameters, including trainable and non-trainable parameters. | +| mlm:pretrained_source | string | The source of the pretraining. Can refer to popular pretraining datasets by name (i.e. Imagenet) or less known datasets by URL and description. | +| mlm:summary | string | Text summary of the model and it's purpose. | +| batch_size_suggestion | number | A suggested batch size for the accelerator and summarized hardware. | +| mlm:parameters | [Parameters Object](#parameters-object) | Mapping with names for the parameters and their values. The field should be specified here if parameters apply to all [Model Input Objects](#model-input-object). If each [Model Input Object](#model-input-object) has parameters, specify parameters in that object. | + In addition, fields from the following extensions must be imported in the item: - [Scientific Extension Specification][stac-ext-sci] to describe relevant publications. @@ -105,39 +106,40 @@ It is recommended to define `accelerator` with one of the following values: ### mlm:model Asset -| Field Name | Type | Description | -| ---------- | ------ | ------------------------------------------------------------------------- | -| title | string | Description of the model asset. | -| href | string | Url to the checkoint or model artifact. | -| type | string | "application/x-pytorch" or specify another appropriate custom media type. | -| roles | string | Specify one or more of ["model", "weights", "compiled"] | +| Field Name | Type | Description | +|------------|----------|---------------------------------------------------------------------------| +| title | string | Description of the model asset. | +| href | string | Url to the checkpoint or model artifact. | +| type | string | "application/x-pytorch" or specify another appropriate custom media type. | +| roles | [string] | Specify one or more of ["model", "weights", "compiled"] | + Recommended asset `roles` include `weights` for model weights that need to be loaded by a model definition and `compiled` for models that can be loaded directly without an intermediate model definition. ### mlm:source_code Asset -| Field Name | Type | Description | -| ----------- | ------ | ------------------------------------------------------------------------------------------------------------------------------------------------------- | -| title | string | Title of the source code. | -| href | string | Url to the repository. | -| type | string | Use media type "text/html" for code files | -| roles | string | Specify one or more of ["model", "code", "metadata"] | -| commit_hash | string | Hash value pointing to a specific version of the code used to run model inference. The commit hash must refer to a Git repository linked in this asset. | -| description | string | Description of the source code. | +| Field Name | Type | Description | +|-------------|----------|---------------------------------------------------------------------------------------------------------------------------------------------------------| +| title | string | Title of the source code. | +| href | string | Url to the repository. | +| type | string | Use media type `"text/html"` for code files | +| roles | [string] | Specify one or more of ["model", "code", "metadata"] | +| commit_hash | string | Hash value pointing to a specific version of the code used to run model inference. The commit hash must refer to a Git repository linked in this asset. | +| description | string | Description of the source code. | The `description` field in the Asset Object should reference the inference function, for example my_package.my_module.predict. Recommended asset `roles` include `code` and `metadata`, since the source code asset might also refer to more detailed metadata than this spec captures. ### Container Asset -| Field Name | Type | Description | -| ----------- | ------ | ----------------------------------------------------- | -| title | string | Description of the container. | -| href | string | Url of the container file (Dockerfile). | -| type | string | "application/vnd.oci.image.index.v1+json" | -| roles | string | ["runtime"] | -| working_dir | string | Working directory in the instance that can be mapped. | -| run | string | Running command. | +| Field Name | Type | Description | +|-------------|----------|-------------------------------------------------------| +| title | string | Description of the container. | +| href | string | Url of the container file (Dockerfile). | +| type | string | "application/vnd.oci.image.index.v1+json" | +| roles | [string] | Specify ["runtime"] and any other custom roles. | +| working_dir | string | Working directory in the instance that can be mapped. | +| run | string | Running command. | If you're unsure how to containerize your model, we suggest starting from the latest official container image for your framework that works with your model and pinning the container version. From 2a2039b8faa8de3de1f5c13068a1f176c01a9cc8 Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Tue, 26 Mar 2024 10:18:50 -0700 Subject: [PATCH 066/112] remove parameters, add artifact type field --- README.md | 24 ++++++++++-------------- 1 file changed, 10 insertions(+), 14 deletions(-) diff --git a/README.md b/README.md index 48379af..c14c2a0 100644 --- a/README.md +++ b/README.md @@ -56,15 +56,10 @@ Check the original technical report for an earlier version of the Model Extensio | mlm:accelerator | [Accelerator Enum](#accelerator-enum) | **REQUIRED.** The intended computational hardware that runs inference. | | mlm:accelerator_constrained | boolean | **REQUIRED.** True if the intended `accelerator` is the only `accelerator` that can run inference. False if other accelerators, such as amd64 (CPU), can run inference. | | mlm:hardware_summary | string | **REQUIRED.** A high level description of the number of accelerators, specific generation of the `accelerator`, or other relevant inference details. | -| mlm:model | [Asset Object](stac-asset) | **REQUIRED.** Asset object containing URI to the model file. | -| mlm:source_code | [Asset Object](stac-asset) | **REQUIRED.** Source code description. Can describe a github repo, zip archive, etc. | -| mlm:container | [Asset Object](stac-asset) | **RECOMMENDED.** Information to run the model in a container with URI to the container. | | mlm:total_parameters | integer | Total number of model parameters, including trainable and non-trainable parameters. | | mlm:pretrained_source | string | The source of the pretraining. Can refer to popular pretraining datasets by name (i.e. Imagenet) or less known datasets by URL and description. | | mlm:summary | string | Text summary of the model and it's purpose. | | batch_size_suggestion | number | A suggested batch size for the accelerator and summarized hardware. | -| mlm:parameters | [Parameters Object](#parameters-object) | Mapping with names for the parameters and their values. The field should be specified here if parameters apply to all [Model Input Objects](#model-input-object). If each [Model Input Object](#model-input-object) has parameters, specify parameters in that object. | - In addition, fields from the following extensions must be imported in the item: - [Scientific Extension Specification][stac-ext-sci] to describe relevant publications. @@ -82,7 +77,6 @@ In addition, fields from the following extensions must be imported in the item: | name | string | **REQUIRED.** Informative name of the input variable. Example "RGB Time Series" | | | bands | [string] | **REQUIRED.** The names of the raster bands used to train or fine-tune the model, which may be all or a subset of bands available in a STAC Item's [Band Object](#bands-and-statistics). | | | input_array | [Array Object](#feature-array-object) | **REQUIRED.** The N-dimensional array object that describes the shape, dimension ordering, and data type. | | -| parameters | [Parameters Object](#parameters-object) | Mapping with names for the parameters and their values. Some models may take additional scalars, tuples, and other non-tensor inputs like text. | | | norm_by_channel | boolean | Whether to normalize each channel by channel-wise statistics or to normalize by dataset statistics. If True, use an array of [Statistics Objects](#bands-and-statistics) that is ordered like the `bands` field in this object. | | | norm_type | string | Normalization method. Select one option from `min_max`, `z_score`, `max_norm`, `mean_norm`, `unit_variance`, `norm_with_clip`, `none` | | | resize_type | string | High-level descriptor of the rescaling method to change image shape. Select one option from `crop`, `pad`, `interpolation`, `none`. If your rescaling method combines more than one of these operations, provide the name of the operation instead | | @@ -104,14 +98,23 @@ It is recommended to define `accelerator` with one of the following values: [stac-asset]: https://github.com/radiantearth/stac-spec/blob/master/collection-spec/collection-spec.md#asset-object + +### MLM Asset Fields + +| mlm:model | [Asset Object](stac-asset) | **REQUIRED.** Asset object containing URI to the model file. | +| mlm:source_code | [Asset Object](stac-asset) | **REQUIRED.** Source code description. Can describe a github repo, zip archive, etc. | +| mlm:container | [Asset Object](stac-asset) | **RECOMMENDED.** Information to run the model in a container with URI to the container. | + + ### mlm:model Asset | Field Name | Type | Description | |------------|----------|---------------------------------------------------------------------------| | title | string | Description of the model asset. | -| href | string | Url to the checkpoint or model artifact. | +| href | string | Url to the model artifact. | | type | string | "application/x-pytorch" or specify another appropriate custom media type. | | roles | [string] | Specify one or more of ["model", "weights", "compiled"] | +| mlm:artifact_type | ArtifactTypeEnum | Specifies the kind of model artifact. Typically related to a particular ml framework. | Recommended asset `roles` include `weights` for model weights that need to be loaded by a model definition and `compiled` for models that can be loaded directly without an intermediate model definition. @@ -165,13 +168,6 @@ You can also use other base images. Pytorch and Tensorflow offer docker images f - [Torchserve](https://pytorch.org/serve/) - [TFServing](https://github.com/tensorflow/serving) -### Parameters Object - -| Field Name | Type | Description | -|---------------------------------------|------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| *parameter names depend on the model* | number `\|` string `\|` boolean `\|` array | The number of fields and their names depend on the model. Values should not be n-dimensional array inputs. If the model input can be represented as an n-dimensional array, it should instead be supplied as another [model input object](#model-input-object). | - -The `Parameters Object` is a user defined mapping of parameters to parameter values. This is meant to capture model inputs that can't be represented as n-dimensional arrays/tensors. This includes inputs like scalars, text, and booleans. The `parameters` field can either be specified in the [Model Input Object](#model-input-object) if they are associated with a specific input or as an [Item or Collection](#item-properties-and-collection-fields) field if the parameters are supplied without relation to a specific model input. For example: the [Segment Anything](https://ai.meta.com/blog/segment-anything-foundation-model-image-segmentation/) foundational model accepts a label integer for each image input. #### Bands and Statistics From 67b46882d8d757ea025bafe127cd3fdd884a4cbc Mon Sep 17 00:00:00 2001 From: Francis Charette-Migneault Date: Thu, 28 Mar 2024 15:54:27 -0400 Subject: [PATCH 067/112] [wip] address PR comments about tasks definitions --- CHANGELOG.md | 4 +++ README.md | 85 +++++++++++++++++++++++++++++++--------------------- 2 files changed, 55 insertions(+), 34 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 759fda6..a205c8b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,12 +15,16 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - [`hardware_summary`, `accelerator`, `accelerator_constrained`](./README#runtime-object) to specify hardware requirements for inference - Use common metadata [Asset Object](https://github.com/radiantearth/stac-spec/blob/master/collection-spec/collection-spec.md#asset-object) to refer to model asset and source code. - flexible [class map object](./README.md#class-map-object) and [parameters object](./README.md#parameters-object) to handle aspects of models that vary substantially in number +- add `scene-classification` to the Enum Tasks to allow disambiguation between pixel-wise and patch-based classification ### Changed - reorganized `dlm:architecture` nested fields to exist at the top level of properties as `mlm:name`, `mlm:summary` and so on to provide STAC API search capabilities. - replaced `normalization:mean`, etc. with [statistics](./README.md#bands-and-statistics) from STAC 1.1 common metadata - added `pydantic` models for internal schema objects in `stac_model` package and published to PYPI - specified [rel_type](./README.md#relation-types) to be `derived_from` and specify how model item or collection json should be named +- replaced all Enum Tasks names to use hyphens instead of spaces +- replaced `dlm:task` by `mlm:tasks` using an array of value instead of a single one, allowing models to represent + multiple tasks they support simultaneously or interchangeably depending on context ### Deprecated - diff --git a/README.md b/README.md index c14c2a0..a9f6668 100644 --- a/README.md +++ b/README.md @@ -43,23 +43,23 @@ Check the original technical report for an earlier version of the Model Extensio ## Item Properties and Collection Fields -| Field Name | Type | Description | -|-----------------------------|-----------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| mlm:name | string | **REQUIRED.** A unique name for the model. This should include but be distinct from simply naming the model architecture. If there is a publication or other published work related to the model, use the official name of the model. | -| mlm:task | [Task Enum](#task-enum) | **REQUIRED.** Specifies the primary Machine Learning task for which the output can be used for. If there are multi-modal outputs, specify the primary task and specify each task in the [Model Output Object](#model-output-object). | -| mlm:framework | string | **REQUIRED.** Framework used to train the model (ex: PyTorch, TensorFlow). | -| mlm:framework_version | string | **REQUIRED.** The `framework` library version. Some models require a specific version of the machine learning `framework` to run. | -| mlm:file_size | integer | **REQUIRED.** The size on disk of the model artifact (bytes). | -| mlm:memory_size | integer | **REQUIRED.** The in-memory size of the model on the accelerator during inference (bytes). | -| mlm:input | [[Model Input Object](#model-input-object)] | **REQUIRED.** Describes the transformation between the EO data and the model input. | -| mlm:output | [[Model Output Object](#model-output-object)] | **REQUIRED.** Describes each model output and how to interpret it. | -| mlm:accelerator | [Accelerator Enum](#accelerator-enum) | **REQUIRED.** The intended computational hardware that runs inference. | -| mlm:accelerator_constrained | boolean | **REQUIRED.** True if the intended `accelerator` is the only `accelerator` that can run inference. False if other accelerators, such as amd64 (CPU), can run inference. | -| mlm:hardware_summary | string | **REQUIRED.** A high level description of the number of accelerators, specific generation of the `accelerator`, or other relevant inference details. | -| mlm:total_parameters | integer | Total number of model parameters, including trainable and non-trainable parameters. | -| mlm:pretrained_source | string | The source of the pretraining. Can refer to popular pretraining datasets by name (i.e. Imagenet) or less known datasets by URL and description. | -| mlm:summary | string | Text summary of the model and it's purpose. | -| batch_size_suggestion | number | A suggested batch size for the accelerator and summarized hardware. | +| Field Name | Type | Description | +|-----------------------------|-----------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| mlm:name | string | **REQUIRED.** A unique name for the model. This should include but be distinct from simply naming the model architecture. If there is a publication or other published work related to the model, use the official name of the model. | +| mlm:tasks | [[Task Enum](#task-enum)] | **REQUIRED.** Specifies the Machine Learning tasks for which the model can be used for. If multi-tasks outputs are provided by distinct model heads, specify all available tasks under the main properties and specify respective tasks in each [Model Output Object](#model-output-object). | +| mlm:framework | string | **REQUIRED.** Framework used to train the model (ex: PyTorch, TensorFlow). | +| mlm:framework_version | string | **REQUIRED.** The `framework` library version. Some models require a specific version of the machine learning `framework` to run. | +| mlm:file_size | integer | **REQUIRED.** The size on disk of the model artifact (bytes). | +| mlm:memory_size | integer | **REQUIRED.** The in-memory size of the model on the accelerator during inference (bytes). | +| mlm:input | [[Model Input Object](#model-input-object)] | **REQUIRED.** Describes the transformation between the EO data and the model input. | +| mlm:output | [[Model Output Object](#model-output-object)] | **REQUIRED.** Describes each model output and how to interpret it. | +| mlm:accelerator | [Accelerator Enum](#accelerator-enum) | **REQUIRED.** The intended computational hardware that runs inference. | +| mlm:accelerator_constrained | boolean | **REQUIRED.** True if the intended `accelerator` is the only `accelerator` that can run inference. False if other accelerators, such as amd64 (CPU), can run inference. | +| mlm:hardware_summary | string | **REQUIRED.** A high level description of the number of accelerators, specific generation of the `accelerator`, or other relevant inference details. | +| mlm:total_parameters | integer | Total number of model parameters, including trainable and non-trainable parameters. | +| mlm:pretrained_source | string | The source of the pretraining. Can refer to popular pretraining datasets by name (i.e. Imagenet) or less known datasets by URL and description. | +| mlm:summary | string | Text summary of the model and it's purpose. | +| batch_size_suggestion | number | A suggested batch size for the accelerator and summarized hardware. | In addition, fields from the following extensions must be imported in the item: - [Scientific Extension Specification][stac-ext-sci] to describe relevant publications. @@ -190,8 +190,8 @@ Note: It is common in the machine learning, computer vision, and remote sensing ### Model Output Object | Field Name | Type | Description | -| ------------------------ | --------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| task | [Task Enum](#task-enum) | **REQUIRED.** Specifies the Machine Learning task for which the output can be used for. | +|--------------------------| --------------------------------------------- |--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| tasks | [[Task Enum](#task-enum)] | **REQUIRED.** Specifies the Machine Learning tasks for which the output can be used for. This can be a subset of `mlm:tasks` defined under the Item `properties` as applicable. | | result_array | [[Result Array Object](#result-array-object)] | The list of output arrays/tensors from the model. | | classification:classes | [[Class Object](#class-object)] | A list of class objects adhering to the [Classification extension](https://github.com/stac-extensions/classification). | | post_processing_function | string | A url to the postprocessing function where normalization, rescaling, and other operations take place.. Or, instead, the function code path, for example: `my_package.my_module.my_processing_function` | @@ -202,22 +202,38 @@ While only `task` is a required field, all fields are recommended for supervised #### Task Enum -It is recommended to define `task` with one of the following values for each Model Output Object: -- `regression` -- `classification` -- `object detection` -- `semantic segmentation` -- `instance segmentation` -- `panoptic segmentation` -- `multi-modal` -- `similarity search` -- `image captioning` -- `generative` -- `super resolution` - -If the task falls within the category of supervised machine learning and uses labels during training, this should align with the `label:tasks` values defined in [STAC Label Extension][stac-ext-label-props] for relevant +It is recommended to define `mlm:tasks` of the entire model and `tasks` of [Model Output Object](#model-output-object) +with the following values. Although other values are permitted, they should be used sparingly to allow better +interoperability of models and their representation. + +| Task Name | Corresponding `label:tasks` | Description | +|-------------------------|------------------------------|----------------------------------------------------------------------------------------| +| `regression` | `regression` | Generic regression that estimates a numeric value. | +| `classification` | `classification` | Generic classification task that assigns class labels to an output. | +| `scene-classification` | *n/a* | +| `detection` | `detection` | Generic detection of the "presence" of objects or entities, with or without positions. | +| `object-detection` | *n/a* | +| `segmentation` | *n/a* | | +| `semantic-segmentation` | *n/a* | +| `instance-segmentation` | *n/a* | +| `panoptic-segmentation` | *n/a* | +| `similarity-search` | *n/a* | +| `image-captioning` | *n/a* | +| `generative` | *n/a* | +| `super-resolution` | *n/a* | + +If the task falls within the category of supervised machine learning and uses labels during training, +this should align with the `label:tasks` values defined in [STAC Label Extension][stac-ext-label-props] for relevant STAC Collections and Items published with the model described by this extension. +It is to be noted that multiple "generic" tasks names (`classification`, `detection`, etc.) are defined to allow +correspondance with `label:tasks`, but these can lead to some ambiguity depending on context. For example, a model +that supports `classification` could mean that the model can predict patch-based classes over an entire scene +(i.e.: `scene-classification` for a single prediction over an entire area of interest as a whole), +or that it can predict pixel-wise classification (i.e.: `pixel-classification`), such as land-cover labels for +every single pixel coordinate over the area of interest. To avoid this kind of ambiguity, `tasks` should always aim +to provide the most specific definitions possible to explicitly describe the model. + [stac-ext-label-props]: https://github.com/stac-extensions/label#item-properties #### Result Array Object @@ -230,7 +246,8 @@ STAC Collections and Items published with the model described by this extension. #### Class Object -See the documentation for the [Class Object](https://github.com/stac-extensions/classification?tab=readme-ov-file#class-object). We don't use the Bit Field Object since inputs and outputs to machine learning models don't typically use bit fields. +See the documentation for the +[Class Object](https://github.com/stac-extensions/classification?tab=readme-ov-file#class-object). ## Relation types From efe223b9dc67313345e7db0971810ce85101cf00 Mon Sep 17 00:00:00 2001 From: Francis Charette-Migneault Date: Fri, 29 Mar 2024 17:36:31 -0400 Subject: [PATCH 068/112] apply PR recommendations --- README.md | 391 ++++++++++++++++++++++++++++++++++++------------------ 1 file changed, 263 insertions(+), 128 deletions(-) diff --git a/README.md b/README.md index a9f6668..6873633 100644 --- a/README.md +++ b/README.md @@ -13,7 +13,8 @@ - [@ymoisan](https://github.com/ymoisan) - [@sfoucher](https://github.com/sfoucher) -The STAC Machine Learning Model (MLM) Extension provides a standard set of fields to describe machine learning models trained on overhead imagery and enable running model inference. +The STAC Machine Learning Model (MLM) Extension provides a standard set of fields to describe machine learning models +trained on overhead imagery and enable running model inference. The main objectives of the extension are: @@ -22,16 +23,25 @@ The main objectives of the extension are: Specifically, this extension records the following information to make ML models searchable and reusable: 1. Sensor band specifications -1. Model input transforms including resize and normalization -1. Model output shape, data type, and its semantic interpretation -1. An optional, flexible description of the runtime environment to be able to run the model -1. Scientific references - -The MLM specification is biased towards providing metadata fields for supervised machine learning models. However, fields that relate to supervised ML are optional and users can use the fields they need for different tasks. - -See [Best Practices](./best-practices.md) for guidance on what other STAC extensions you should use in conjunction with this extension. The Machine Learning Model Extension purposely omits and delegates some definitions to other STAC extensions to favor reusability and avoid metadata duplication whenever possible. A properly defined MLM STAC Item/Collection should almost never have the Machine Learning Model Extension exclusively in `stac_extensions`. - -Check the original technical report for an earlier version of the Model Extension, formerly known as the Deep Learning Model Extension (DLM), [here](https://github.com/crim-ca/CCCOT03/raw/main/CCCOT03_Rapport%20Final_FINAL_EN.pdf) for more details. The DLM was renamed to the current MLM Extension and refactored to form a cohesive definition across all machine learning approaches, regardless of whether the approach constitutes a deep neural network or other statistical approach. +2. Model input transforms including resize and normalization +3. Model output shape, data type, and its semantic interpretation +4. An optional, flexible description of the runtime environment to be able to run the model +5. Scientific references + +The MLM specification is biased towards providing metadata fields for supervised machine learning models. +However, fields that relate to supervised ML are optional and users can use the fields they need for different tasks. + +See [Best Practices](./best-practices.md) for guidance on what other STAC extensions you should use in conjunction with this extension. +The Machine Learning Model Extension purposely omits and delegates some definitions to other STAC extensions to favor +reusability and avoid metadata duplication whenever possible. A properly defined MLM STAC Item/Collection should almost +never have the Machine Learning Model Extension exclusively in `stac_extensions`. + +Check the original [Technical Report](https://github.com/crim-ca/CCCOT03/raw/main/CCCOT03_Rapport%20Final_FINAL_EN.pdf) +for an earlier version of the MLM Extension, formerly known as the Deep Learning Model Extension (DLM). +DLM was renamed to the current MLM Extension and refactored to form a cohesive definition across all machine +learning approaches, regardless of whether the approach constitutes a deep neural network or other statistical approach. +It also combines multiple definitions from the predecessor [ML-Model](https://github.com/stac-extensions/ml-model) +extension to synthesize common use cases into a single reference for Machine Learning Models. ![Image Description](https://i.imgur.com/cVAg5sA.png) @@ -43,23 +53,24 @@ Check the original technical report for an earlier version of the Model Extensio ## Item Properties and Collection Fields -| Field Name | Type | Description | -|-----------------------------|-----------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| mlm:name | string | **REQUIRED.** A unique name for the model. This should include but be distinct from simply naming the model architecture. If there is a publication or other published work related to the model, use the official name of the model. | -| mlm:tasks | [[Task Enum](#task-enum)] | **REQUIRED.** Specifies the Machine Learning tasks for which the model can be used for. If multi-tasks outputs are provided by distinct model heads, specify all available tasks under the main properties and specify respective tasks in each [Model Output Object](#model-output-object). | -| mlm:framework | string | **REQUIRED.** Framework used to train the model (ex: PyTorch, TensorFlow). | -| mlm:framework_version | string | **REQUIRED.** The `framework` library version. Some models require a specific version of the machine learning `framework` to run. | -| mlm:file_size | integer | **REQUIRED.** The size on disk of the model artifact (bytes). | -| mlm:memory_size | integer | **REQUIRED.** The in-memory size of the model on the accelerator during inference (bytes). | -| mlm:input | [[Model Input Object](#model-input-object)] | **REQUIRED.** Describes the transformation between the EO data and the model input. | -| mlm:output | [[Model Output Object](#model-output-object)] | **REQUIRED.** Describes each model output and how to interpret it. | -| mlm:accelerator | [Accelerator Enum](#accelerator-enum) | **REQUIRED.** The intended computational hardware that runs inference. | -| mlm:accelerator_constrained | boolean | **REQUIRED.** True if the intended `accelerator` is the only `accelerator` that can run inference. False if other accelerators, such as amd64 (CPU), can run inference. | -| mlm:hardware_summary | string | **REQUIRED.** A high level description of the number of accelerators, specific generation of the `accelerator`, or other relevant inference details. | -| mlm:total_parameters | integer | Total number of model parameters, including trainable and non-trainable parameters. | -| mlm:pretrained_source | string | The source of the pretraining. Can refer to popular pretraining datasets by name (i.e. Imagenet) or less known datasets by URL and description. | -| mlm:summary | string | Text summary of the model and it's purpose. | -| batch_size_suggestion | number | A suggested batch size for the accelerator and summarized hardware. | +| Field Name | Type | Description | +|-----------------------------|--------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| mlm:name | string | **REQUIRED** A unique name for the model. This can include, but must be distinct, from simply naming the model architecture. If there is a publication or other published work related to the model, use the official name of the model. | +| mlm:architecture | [Model Architecture](#model-architecture) string | **REQUIRED** A generic and well established architecture name of the model. | +| mlm:tasks | [[Task Enum](#task-enum)] | **REQUIRED** Specifies the Machine Learning tasks for which the model can be used for. If multi-tasks outputs are provided by distinct model heads, specify all available tasks under the main properties and specify respective tasks in each [Model Output Object](#model-output-object). | +| mlm:framework | string | **REQUIRED** Framework used to train the model (ex: PyTorch, TensorFlow). | +| mlm:framework_version | string | **REQUIRED** The `framework` library version. Some models require a specific version of the machine learning `framework` to run. | +| mlm:file_size | integer | **REQUIRED** The size on disk of the model artifact (bytes). | +| mlm:memory_size | integer | **REQUIRED** The in-memory size of the model on the accelerator during inference (bytes). | +| mlm:input | [[Model Input Object](#model-input-object)] | **REQUIRED** Describes the transformation between the EO data and the model input. | +| mlm:output | [[Model Output Object](#model-output-object)] | **REQUIRED** Describes each model output and how to interpret it. | +| mlm:accelerator | [Accelerator Enum](#accelerator-enum) | **REQUIRED** The intended computational hardware that runs inference. | +| mlm:accelerator_constrained | boolean | **REQUIRED** True if the intended `accelerator` is the only `accelerator` that can run inference. False if other accelerators, such as amd64 (CPU), can run inference. | +| mlm:hardware_summary | string | **REQUIRED** A high level description of the number of accelerators, specific generation of the `accelerator`, or other relevant inference details. | +| mlm:total_parameters | integer | Total number of model parameters, including trainable and non-trainable parameters. | +| mlm:pretrained_source | string | The source of the pretraining. Can refer to popular pretraining datasets by name (i.e. Imagenet) or less known datasets by URL and description. | +| mlm:summary | string | Text summary of the model and it's purpose. | +| batch_size_suggestion | number | A suggested batch size for the accelerator and summarized hardware. | In addition, fields from the following extensions must be imported in the item: - [Scientific Extension Specification][stac-ext-sci] to describe relevant publications. @@ -68,21 +79,32 @@ In addition, fields from the following extensions must be imported in the item: [stac-ext-sci]: https://github.com/radiantearth/stac-spec/tree/v1.0.0-beta.2/extensions/scientific/README.md [stac-ext-ver]: https://github.com/radiantearth/stac-spec/tree/v1.0.0-beta.2/extensions/version/README.md +### Model Architecture -### Model Input Object +In most cases, this should correspond to common architecture names defined in the literature, +such as `ResNet`, `VGG`, `GAN` or `Vision Transformer`. For more examples of proper names (including casing), +the [Papers With Code - Computer Vision Methods](https://paperswithcode.com/methods/area/computer-vision) can be used. +Note that this field is not an explicit "Enum", and is used only as an indicator of common architecture occurrences. +If no specific or predefined architecture can be associated with the described model, simply employ `unknown` or +another custom name as deemed appropriate. +### Model Input Object -| Field Name | Type | Description | | -| ----------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --- | -| name | string | **REQUIRED.** Informative name of the input variable. Example "RGB Time Series" | | -| bands | [string] | **REQUIRED.** The names of the raster bands used to train or fine-tune the model, which may be all or a subset of bands available in a STAC Item's [Band Object](#bands-and-statistics). | | -| input_array | [Array Object](#feature-array-object) | **REQUIRED.** The N-dimensional array object that describes the shape, dimension ordering, and data type. | | -| norm_by_channel | boolean | Whether to normalize each channel by channel-wise statistics or to normalize by dataset statistics. If True, use an array of [Statistics Objects](#bands-and-statistics) that is ordered like the `bands` field in this object. | | -| norm_type | string | Normalization method. Select one option from `min_max`, `z_score`, `max_norm`, `mean_norm`, `unit_variance`, `norm_with_clip`, `none` | | -| resize_type | string | High-level descriptor of the rescaling method to change image shape. Select one option from `crop`, `pad`, `interpolation`, `none`. If your rescaling method combines more than one of these operations, provide the name of the operation instead | | -| statistics | [Statistics Object](stac-statistics) `\|` [[Statistics Object](stac-statistics)] | Dataset statistics for the training dataset used to normalize the inputs. | | -| norm_with_clip_values | [integer] | If `norm_type = "norm_with_clip"` this array supplies a value that is less than the band maximum. The array must be the same length as "bands", each value is used to divide each band before clipping values between 0 and 1. | -| pre_processing_function | string | A url to the preprocessing function where normalization and rescaling takes place, and any other significant operations. Or, instead, the function code path, for example: `my_python_module_name:my_processing_function` | | +| Field Name | Type | Description | +|-------------------------|---------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| name | string | **REQUIRED** Name of the input variable defined by the model. If no explicit name is defined by the model, an informative name (e.g.: "RGB Time Series") can be used instead. | +| bands | [string] | **REQUIRED** The names of the raster bands used to train or fine-tune the model, which may be all or a subset of bands available in a STAC Item's [Band Object](#bands-and-statistics). | +| input | [Input Structure Object](#input-structure-object) | **REQUIRED** The N-dimensional array definition that describes the shape, dimension ordering, and data type. | +| norm_by_channel | boolean | Whether to normalize each channel by channel-wise statistics or to normalize by dataset statistics. If True, use an array of `statistics` of same dimensionality and order as the `bands` field in this object. | +| norm_type | string \| null | Normalization method. Select one option from `"min_max"`, `"z_score"`, `"max_norm"`, `"mean_norm"`, `"unit_variance"`, `"norm_with_clip"` or `null` when none applies. | +| resize_type | string \| null | High-level descriptor of the rescaling method to change image shape. Select one option from `"crop"`, `"pad"`, `"interpolation"` or `null` when none applies. If your rescaling method combines more than one of these operations, provide the name of the operation instead. | +| statistics | [[Statistics Object](#bands-and-statistics)] | Dataset statistics for the training dataset used to normalize the inputs. | +| norm_with_clip_values | [integer] | If `norm_type = "norm_with_clip"` this array supplies a value that is less than the band maximum. The array must be the same length as `bands`, each value is used to divide each band before clipping values between 0 and 1. | +| pre_processing_function | string \| null | URI to the preprocessing function where normalization and rescaling takes place, and any other significant operations or, instead, the function code path, for example: `my_python_module_name:my_processing_function`. | + +Fields that accept the `null` value can be considered `null` when omitted entirely for parsing purposes. +However, setting `null` explicitly when this information is known by the model provider can help users understand +what is the expected behavior of the model. It is therefore recommended to provide `null` explicitly when applicable. ### Accelerator Enum @@ -99,52 +121,132 @@ It is recommended to define `accelerator` with one of the following values: [stac-asset]: https://github.com/radiantearth/stac-spec/blob/master/collection-spec/collection-spec.md#asset-object -### MLM Asset Fields - -| mlm:model | [Asset Object](stac-asset) | **REQUIRED.** Asset object containing URI to the model file. | -| mlm:source_code | [Asset Object](stac-asset) | **REQUIRED.** Source code description. Can describe a github repo, zip archive, etc. | -| mlm:container | [Asset Object](stac-asset) | **RECOMMENDED.** Information to run the model in a container with URI to the container. | - - -### mlm:model Asset - -| Field Name | Type | Description | -|------------|----------|---------------------------------------------------------------------------| -| title | string | Description of the model asset. | -| href | string | Url to the model artifact. | -| type | string | "application/x-pytorch" or specify another appropriate custom media type. | -| roles | [string] | Specify one or more of ["model", "weights", "compiled"] | -| mlm:artifact_type | ArtifactTypeEnum | Specifies the kind of model artifact. Typically related to a particular ml framework. | - - -Recommended asset `roles` include `weights` for model weights that need to be loaded by a model definition and `compiled` for models that can be loaded directly without an intermediate model definition. - -### mlm:source_code Asset - -| Field Name | Type | Description | -|-------------|----------|---------------------------------------------------------------------------------------------------------------------------------------------------------| -| title | string | Title of the source code. | -| href | string | Url to the repository. | -| type | string | Use media type `"text/html"` for code files | -| roles | [string] | Specify one or more of ["model", "code", "metadata"] | -| commit_hash | string | Hash value pointing to a specific version of the code used to run model inference. The commit hash must refer to a Git repository linked in this asset. | -| description | string | Description of the source code. | - - -The `description` field in the Asset Object should reference the inference function, for example my_package.my_module.predict. Recommended asset `roles` include `code` and `metadata`, since the source code asset might also refer to more detailed metadata than this spec captures. +## Assets Objects + +| Field Name | Type | Description | +|-----------------|----------------------------|-------------------------------------------------------------------------------------------| +| mlm:model | [Asset Object][stac-asset] | **REQUIRED** Asset object containing the model definition. | +| mlm:source_code | [Asset Object][stac-asset] | **RECOMMENDED** Source code description. Can describe a Git repository, ZIP archive, etc. | +| mlm:container | [Asset Object][stac-asset] | **RECOMMENDED** Information to run the model in a container with URI to the container. | +| mlm:training | [Asset Object][stac-asset] | **RECOMMENDED** Information to run the training pipeline of the model being described. | +| mlm:inference | [Asset Object][stac-asset] | **RECOMMENDED** Information to run the inference pipeline of the model being described. | + +It is recommended that the [Assets][stac-asset] defined in a STAC Item using MLM extension use the above field property +names for nesting the Assets in order to improve their quick identification, although the specific names employed are +left up to user preference. However, the MLM Asset definitions **MUST** include the +appropriate [MLM Asset Roles](#mlm-asset-roles) to ensure their discovery. + +### MLM Asset Roles + +Asset `roles` should include relevant names that describe them. This does not only include +the [Recommended Asset Roles](https://github.com/radiantearth/stac-spec/blob/master/item-spec/item-spec.md#asset-roles) +from the core specification, such as `data` or `metadata`, but also descriptors such as `mlm:model`, `mlm:weights` and +so on, as applicable for the relevant [MLM Asset](#mlm-assets) being described. Please refer to the following sections +for `roles` requirements by specific [MLM Asset](#mlm-assets). + +Note that `mlm:` prefixed roles are used for identification purpose of the Assets, but non-prefixed roles can be +provided as well to offer generic descriptors. For example, `["mlm:model", "model", "data"]` could be considered for +the [Model Asset](#model-asset). + +In order to provide more context, the following roles are also recommended were applicable: + +| Asset Role | Additional Roles | Description | +|---------------------------|-------------------------|------------------------------------------------------------------------------------------| +| mlm:inference-runtime (*) | `runtime` | Describes an Asset that provides runtime reference to perform model inference. | +| mlm:training-runtime (*) | `runtime` | Describes an Asset that provides runtime reference to perform model training. | +| mlm:checkpoint (*) | `weights`, `checkpoint` | Describes an Asset that provides a model checkpoint with embedded model configurations. | +| mlm:weights | `weights`, `checkpoint` | Describes an Asset that provides a model weights (typically some Tensor representation). | +| mlm:model | `model` | Required role for [Model Asset](#model-asset). | +| mlm:source_code | `code` | Required role for [Model Asset](#source-code-asset). | + +> [!NOTE] +> (*) These roles are offered as direct conversions from the previous extension +> that provided [ML-Model Asset Roles][ml-model-asset-roles] to provide easier upgrade to the MLM extension. + +[ml-model-asset-roles]: https://github.com/stac-extensions/ml-model?tab=readme-ov-file#asset-objects + + +### Model Asset + +| Field Name | Type | Description | +|-------------------|-------------------------------------------|--------------------------------------------------------------------------------------------------| +| title | string | Description of the model asset. | +| href | string | URI to the model artifact. | +| type | string | The media type of the artifact (see [Model Artifact Media-Type](#model-artifact-media-type). | +| roles | [string] | **REQUIRED** Specify `mlm:model`. Can include `["mlm:weights", "mlm:checkpoint"]` as applicable. | +| mlm:artifact_type | [Artifact Type Enum](#artifact-type-enum) | Specifies the kind of model artifact. Typically related to a particular ML framework. | + +Recommended Asset `roles` include `mlm:weights` or `mlm:checkpoint` for model weights that need to be loaded by a +model definition and `mlm:compiled` for models that can be loaded directly without an intermediate model definition. +In each case, the `mlm:model` should be applied as well to indicate that this asset represents the model. + +It is also recommended to make use of the +[file](https://github.com/stac-extensions/file?tab=readme-ov-file#asset--link-object-fields) +extension for this Asset, as it can provide useful information to validate the contents of the model definition, +by comparison with fields `file:checksum` and `file:size` for example. + +#### Model Artifact Media-Type + +Not all ML framework, libraries or model artifacts provide explicit media-type. When those are not provided, custom +media-types can be considered. For example `application/x-pytorch` or `application/octet-stream; application=pytorch` +could be appropriate to represent a PyTorch `.pt` file, since the underlying format is a serialized pickle structure. + +#### Artifact Type Enum + +This value can be used to provide additional details about the specific model artifact being described. +For example, PyTorch offers various strategies for providing model definitions, such as Pickle (`.pt`), TorchScript, +or the compiled approach. Since they all refer to the same ML framework, +the [Model Artifact Media-Type](#model-artifact-media-type) would be insufficient in this case to detect with strategy +should be used. + +Following are some proposed *Artifact Type* values for corresponding approaches, but other names are +permitted as well. Note that the names are selected using the framework-specific definitions to help +the users understand the source explicitly, although this is not strictly required either. + +| Artifact Type | Description | +|--------------------|--------------------------------------------------------------------------------------------------------------------------| +| `torch.compile` | A model artifact obtained by [`torch.compile`](https://pytorch.org/tutorials/intermediate/torch_compile_tutorial.html). | +| `torch.jit.script` | A model artifact obtained by [`TorchScript`](https://pytorch.org/docs/stable/jit.html). | +| `torch.save` | A model artifact saved by [Serialized Pickle Object](https://pytorch.org/tutorials/beginner/saving_loading_models.html). | + +### Source Code Asset + +| Field Name | Type | Description | +|----------------|----------|-------------------------------------------------------------------------------| +| title | string | Title of the source code. | +| href | string | URI to the code repository, a ZIP archive, or an individual code/script file. | +| type | string | Media-type of the URI. | +| roles | [string] | **RECOMMENDED** Specify one or more of `["model", "code", "metadata"]` | +| description | string | Description of the source code. | +| mlm:entrypoint | string | Specific entrypoint reference in the code to use for running model inference. | + +If the referenced code does not directly offer a callable script to run the model, the `mlm:entrypoint` field should be +added to the [Asset Object][stac-asset] in order to provide a pointer to the inference function to execute the model. +For example, `my_package.my_module:predict` would refer to the `predict` function located in the `my_module` inside the +`my_package` library provided by the repository. + +It is strongly recommended to use a specific media-type such as `text/x-python` if the source code refers directly +to a script of a known programming language. Using the HTML rendering of that source file, such as though GitHub +for example, should be avoided. Using the "Raw Contents" endpoint for such cases is preferable. +The `text/html` media-type should be reserved for cases where the URI generally points at a Git repository. +Note that the URI including the specific commit hash, release number or target branch should be preferred over +other means of referring to checkout procedures, although this specification does not prohibit the use of additional +properties to better describe the Asset. + +Recommended asset `roles` include `code` and `metadata`, +since the source code asset might also refer to more detailed metadata than this specification captures. ### Container Asset -| Field Name | Type | Description | -|-------------|----------|-------------------------------------------------------| -| title | string | Description of the container. | -| href | string | Url of the container file (Dockerfile). | -| type | string | "application/vnd.oci.image.index.v1+json" | -| roles | [string] | Specify ["runtime"] and any other custom roles. | -| working_dir | string | Working directory in the instance that can be mapped. | -| run | string | Running command. | +| Field Name | Type | Description | +|-------------|----------|-----------------------------------------------------------------------------------| +| title | string | Description of the container. | +| href | string | URI of the published container, including the container registry, image and tag. | +| type | string | Media-type of the container, typically `application/vnd.oci.image.index.v1+json`. | +| roles | [string] | Specify `["runtime"]` and any other custom roles. | -If you're unsure how to containerize your model, we suggest starting from the latest official container image for your framework that works with your model and pinning the container version. +If you're unsure how to containerize your model, we suggest starting from the latest official container image for +your framework that works with your model and pinning the container version. Examples: [Pytorch Dockerhub](https://hub.docker.com/r/pytorch/pytorch/tags) @@ -155,7 +257,6 @@ Examples: Using a base image for a framework looks like - ```dockerfile # In your Dockerfile, pull the latest base image with all framework dependencies including accelerator drivers FROM pytorch/pytorch:2.1.2-cuda11.8-cudnn8-runtime @@ -171,30 +272,56 @@ You can also use other base images. Pytorch and Tensorflow offer docker images f #### Bands and Statistics -We use the [STAC 1.1 Bands Object](https://github.com/radiantearth/stac-spec/pull/1254) for representing bands information, including the nodata value, data type, and common band names. Only bands used to train or fine tune the model should be included in this `bands` field. +Depending on the supported `stac_version` and other `stac_extensions` employed by the STAC Item using MLM, +the [STAC 1.1 - Band Object][stac-1.1-band], +the [STAC Raster - Band Object][stac-raster-band] or +the [STAC EO - Band Object][stac-eo-band] can be used for +representing bands information, including notably the `nodata` value, +the `data_type` (see also [Data Type Enum](#data-type-enum)), +and [Common Band Names][stac-band-names]. + +Only bands used as input to the model should be included in the MLM `bands` field. +To avoid duplicating the information, MLM only uses the `name` of whichever "Band Object" is defined in the STAC Item. + +One distinction from the [STAC 1.1 - Band Object][stac-1.1-band] in MLM is that [Statistics][stac-1.1-stats] object +(or the corresponding [STAC Raster - Statistics][stac-raster-stats] for STAC 1.0) are not +defined at the "Band Object" level, but at the [Model Input](#model-input-object) level. +This is because, in machine learning, it is common to need overall statistics for the dataset used to train the model +to normalize all bands, rather than normalizing the values over a single product. Furthermore, statistics could be +applied differently for distinct [Model Input](#model-input-object) definitions, in order to adjust for intrinsic +properties of the model. + +[stac-1.1-band]: https://github.com/radiantearth/stac-spec/pull/1254 +[stac-1.1-stats]: https://github.com/radiantearth/stac-spec/pull/1254/files#diff-2477b726f8c5d5d1c8b391be056db325e6918e78a24b414ccd757c7fbd574079R294 +[stac-eo-band]: https://github.com/stac-extensions/eo?tab=readme-ov-file#band-object +[stac-raster-band]: https://github.com/stac-extensions/raster?tab=readme-ov-file#raster-band-object +[stac-raster-stats]: https://github.com/stac-extensions/raster?tab=readme-ov-file#statistics-object +[stac-band-names]: https://github.com/stac-extensions/eo?tab=readme-ov-file#common-band-names -A deviation from the [STAC 1.1 Bands Object](https://github.com/radiantearth/stac-spec/pull/1254) is that we do not include the [Statistics](stac-statistics) object at the band object level, but at the Model Input level. This is because in machine learning, it is common to only need overall statistics for the dataset used to train the model to normalize all bands. +#### Data Type Enum -[stac-statistics]: https://github.com/radiantearth/stac-spec/pull/1254/files#diff-2477b726f8c5d5d1c8b391be056db325e6918e78a24b414ccd757c7fbd574079R294 +When describing the `data_type` provided by a [Band](#bands-and-statistics), whether for defining +the [Input Structure](#input-structure-object) or the [Result Structure](#result-structure-object), +the [Data Types from the STAC Raster extension][raster-data-types] should be used. -#### Array Object +[raster-data-types]: https://github.com/stac-extensions/raster?tab=readme-ov-file#data-types -| Field Name | Type | Description | | -| ---------- | --------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --- | -| shape | [integer] | **REQUIRED.** Shape of the input n-dimensional array ($N \times C \times H \times W$), including the batch size dimension. The batch size dimension must either be greater than 0 or -1 to indicate an unspecified batch dimension size. | | -| dim_order | string | **REQUIRED.** How the above dimensions are ordered within the `shape`. `bhw`, `bchw`, `bthw`, `btchw` are valid orderings where `b`=batch, `c`=channel, `t`=time, `h`=height, w=width. | | -| data_type | enum | **REQUIRED.** The data type of values in the n-dimensional array. For model inputs, this should be the data type of the processed input supplied to the model inference function, not the data type of the source bands. Use one of the [common metadata data types](https://github.com/stac-extensions/raster?tab=readme-ov-file#data-types). | | +#### Input Structure Object -Note: It is common in the machine learning, computer vision, and remote sensing communities to refer to rasters that are inputs to a model as arrays or tensors. Array Objects are distinct from the JSON array type used to represent lists of values. +| Field Name | Type | Description | +|------------|-----------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| shape | [integer] | **REQUIRED** Shape of the input n-dimensional array ($N \times C \times H \times W$), including the batch size dimension. The batch size dimension must either be greater than 0 or -1 to indicate an unspecified batch dimension size. | +| dim_order | string | **REQUIRED** How the above dimensions are ordered within the `shape`. `bhw`, `bchw`, `bthw`, `btchw` are valid orderings where `b`=batch, `c`=channel, `t`=time, `h`=height, `w`=width. | +| data_type | [Data Type Enum](#data-type-enum) | **REQUIRED** The data type of values in the n-dimensional array. For model inputs, this should be the data type of the processed input supplied to the model inference function, not the data type of the source bands. | ### Model Output Object -| Field Name | Type | Description | -|--------------------------| --------------------------------------------- |--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| tasks | [[Task Enum](#task-enum)] | **REQUIRED.** Specifies the Machine Learning tasks for which the output can be used for. This can be a subset of `mlm:tasks` defined under the Item `properties` as applicable. | -| result_array | [[Result Array Object](#result-array-object)] | The list of output arrays/tensors from the model. | -| classification:classes | [[Class Object](#class-object)] | A list of class objects adhering to the [Classification extension](https://github.com/stac-extensions/classification). | -| post_processing_function | string | A url to the postprocessing function where normalization, rescaling, and other operations take place.. Or, instead, the function code path, for example: `my_package.my_module.my_processing_function` | +| Field Name | Type | Description | +|--------------------------|-----------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| tasks | [[Task Enum](#task-enum)] | **REQUIRED** Specifies the Machine Learning tasks for which the output can be used for. This can be a subset of `mlm:tasks` defined under the Item `properties` as applicable. | +| result | [Result Structure Object](#result-structure-object) | The structure that describes the resulting output arrays/tensors from one model head. | +| classification:classes | [[Class Object](#class-object)] | A list of class objects adhering to the [Classification extension](https://github.com/stac-extensions/classification). | +| post_processing_function | string | A url to the postprocessing function where normalization, rescaling, and other operations take place.. Or, instead, the function code path, for example: `my_package.my_module.my_processing_function` | While only `task` is a required field, all fields are recommended for supervised tasks that produce a fixed shape tensor and have output classes. @@ -202,47 +329,55 @@ While only `task` is a required field, all fields are recommended for supervised #### Task Enum -It is recommended to define `mlm:tasks` of the entire model and `tasks` of [Model Output Object](#model-output-object) -with the following values. Although other values are permitted, they should be used sparingly to allow better +It is recommended to define `mlm:tasks` of the entire model at the STAC Item level, +and `tasks` of respective [Model Output Object](#model-output-object) with the following values. +Although other values are permitted to support more use cases, they should be used sparingly to allow better interoperability of models and their representation. -| Task Name | Corresponding `label:tasks` | Description | -|-------------------------|------------------------------|----------------------------------------------------------------------------------------| -| `regression` | `regression` | Generic regression that estimates a numeric value. | -| `classification` | `classification` | Generic classification task that assigns class labels to an output. | -| `scene-classification` | *n/a* | -| `detection` | `detection` | Generic detection of the "presence" of objects or entities, with or without positions. | -| `object-detection` | *n/a* | -| `segmentation` | *n/a* | | -| `semantic-segmentation` | *n/a* | -| `instance-segmentation` | *n/a* | -| `panoptic-segmentation` | *n/a* | -| `similarity-search` | *n/a* | -| `image-captioning` | *n/a* | -| `generative` | *n/a* | -| `super-resolution` | *n/a* | +As a general rule of thumb, if a task is not represented below, an appropriate name can be formulated by taking +definitions listed in [Papers With Code](https://paperswithcode.com/sota). The names +should be normalized to lowercase and use hyphens instead of spaces. + +| Task Name | Corresponding `label:tasks` | Description | +|-------------------------|-----------------------------|-----------------------------------------------------------------------------------------------------------------| +| `regression` | `regression` | Generic regression that estimates a numeric and continuous value. | +| `classification` | `classification` | Generic classification task that assigns class labels to an output. | +| `scene-classification` | *n/a* | Specific classification task where the model assigns a single class label to an entire scene/area. | +| `detection` | `detection` | Generic detection of the "presence" of objects or entities, with or without positions. | +| `object-detection` | *n/a* | Task corresponding to the identification of positions as bounding boxes of object detected in the scene. | +| `segmentation` | `segmentation` | Generic tasks that regroups all types of segmentations tasks consisting of applying labels to pixels. | +| `semantic-segmentation` | *n/a* | Specific segmentation task where all pixels are attributed labels, without consideration of similar instances. | +| `instance-segmentation` | *n/a* | Specific segmentation task that assigns distinct labels for groups of pixels corresponding to object instances. | +| `panoptic-segmentation` | *n/a* | Specific segmentation task that combines instance segmentation of objects and semantic labels for non-objects. | +| `similarity-search` | *n/a* | Generic task to identify whether a query input corresponds to another reference within a corpus. | +| `image-captioning` | *n/a* | Specific task of describing the content of an image in words. | +| `generative` | *n/a* | Generic task that encompasses all synthetic data generation techniques. | +| `super-resolution` | *n/a* | Specific task that increases the quality and resolution of an image by increasing its high-frequency details. | If the task falls within the category of supervised machine learning and uses labels during training, this should align with the `label:tasks` values defined in [STAC Label Extension][stac-ext-label-props] for relevant STAC Collections and Items published with the model described by this extension. -It is to be noted that multiple "generic" tasks names (`classification`, `detection`, etc.) are defined to allow +It is to be noted that multiple "*generic*" tasks names (`classification`, `detection`, etc.) are defined to allow correspondance with `label:tasks`, but these can lead to some ambiguity depending on context. For example, a model that supports `classification` could mean that the model can predict patch-based classes over an entire scene (i.e.: `scene-classification` for a single prediction over an entire area of interest as a whole), -or that it can predict pixel-wise classification (i.e.: `pixel-classification`), such as land-cover labels for -every single pixel coordinate over the area of interest. To avoid this kind of ambiguity, `tasks` should always aim -to provide the most specific definitions possible to explicitly describe the model. +or that it can predict pixel-wise "classifications", such as land-cover labels for +every single pixel coordinate over the area of interest. Maybe counter-intuitively to some users, +such a model that produces pixel-wise "classifications" should be attributed the `segmentation` task +(and more specifically `semantic-segmentation`) rather than `classification`. To avoid this kind of ambiguity, +it is strongly recommended that `tasks` always aim to provide the most specific definitions possible to explicitly +describe what the model accomplishes. [stac-ext-label-props]: https://github.com/stac-extensions/label#item-properties -#### Result Array Object +#### Result Structure Object -| Field Name | Type | Description | -|------------|-----------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| shape | [integer] | **REQUIRED.** Shape of the n-dimensional result array ($N \times H \times W$), possibly including a batch size dimension. The batch size dimension must either be greater than 0 or -1 to indicate an unspecified batch dimension size. | -| dim_names | [string] | **REQUIRED.** The names of the above dimensions of the result array, ordered the same as this object's `shape` field. | -| data_type | enum | **REQUIRED.** The data type of values in the n-dimensional array. For model outputs, this should be the data type of the result of the model inference without extra post processing. Use one of the [common metadata data types](https://github.com/radiantearth/stac-spec/blob/f9b3c59ba810541c9da70c5f8d39635f8cba7bcd/item-spec/common-metadata.md#data-types). | +| Field Name | Type | Description | +|------------|-----------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| shape | [integer] | **REQUIRED** Shape of the n-dimensional result array ($N \times H \times W$), possibly including a batch size dimension. The batch size dimension must either be greater than 0 or -1 to indicate an unspecified batch dimension size. | +| dim_names | [string] | **REQUIRED** The names of the above dimensions of the result array, ordered the same as this object's `shape` field. | +| data_type | [Data Type Enum](#data-type-enum) | **REQUIRED** The data type of values in the n-dimensional array. For model outputs, this should be the data type of the result of the model inference without extra post processing. | #### Class Object @@ -254,8 +389,8 @@ See the documentation for the The following types should be used as applicable `rel` types in the [Link Object](https://github.com/radiantearth/stac-spec/tree/master/item-spec/item-spec.md#link-object) of STAC Items describing Band Assets used with a model. -| Type | Description | -|--------------|----------------------------------------------------------------------------------------------------------------------------| +| Type | Description | +|--------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | derived_from | This link points to _item.json or _collection.json. Replace with the unique [`mlm:name`](#item-properties-and-collection-fields) field's value. | ## Contributing From c79ea01d69f999ff3c13c7a10257a017406a0947 Mon Sep 17 00:00:00 2001 From: Francis Charette-Migneault Date: Fri, 29 Mar 2024 18:13:02 -0400 Subject: [PATCH 069/112] add best practice details --- best-practices.md | 105 +++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 95 insertions(+), 10 deletions(-) diff --git a/best-practices.md b/best-practices.md index 9450d69..e82d089 100644 --- a/best-practices.md +++ b/best-practices.md @@ -1,30 +1,115 @@ # ML Model Extension Best Practices -This document makes a number of recommendations for creating real world ML Model Extensions. None of them are required to meet the core specification, but following these practices will improve the documentation of your model and make life easier for client tooling and users. They come about from practical experience of implementors and introduce a bit more 'constraint' for those who are creating STAC objects representing their models or creating tools to work with STAC. +This document makes a number of recommendations for creating real world ML Model Extensions. +None of them are required to meet the core specification, but following these practices will improve the documentation +of your model and make life easier for client tooling and users. They come about from practical experience of +implementors and introduce a bit more 'constraint' for those who are creating STAC objects representing their +models or creating tools to work with STAC. ## Using STAC Common Metadata Fields for the ML Model Extension -We recommend using the `start_datetime` and `end_datetime`, `geometry`, and `bbox` to represent the recommended context of data the model was trained with and for which the model should have appropriate domain knowledge for inference. For example, we can consider a model which is trained on imagery from all over the world and is robust enough to be applied to any time period. In this case, the common metadata to use with the model would include the bbox of "the world" `[-90, -180, 90, 180]` and the start_datetime and end_datetime range could be generic values like `["1900-01-01", null]`. +It is recommended to use the `start_datetime` and `end_datetime`, `geometry`, and `bbox` to represent the +recommended context of data the model was trained with and for which the model should have appropriate domain +knowledge for inference. For example, we can consider a model which is trained on imagery from all over the world +and is robust enough to be applied to any time period. In this case, the common metadata to use with the model +would include the bbox of "the world" `[-90, -180, 90, 180]` and the `start_datetime` and `end_datetime` range could +be generic values like `["1900-01-01", null]`. ## Recommended Extensions to Compose with the ML Model Extension ### Processing Extension -We recommend using at least the `processing:lineage` and `processing:level` fields from the [Processing Extension](https://github.com/stac-extensions/processing) to make it clear how [Model Input Objects](./README.md#model-input-object) are processed by the data provider prior to an inference preprocessing pipeline. This can help users locate the correct version of the dataset used during model inference or help them reproduce the data processing pipeline. +It is recommended to use at least the `processing:lineage` and `processing:level` fields from +the [Processing Extension](https://github.com/stac-extensions/processing) to make it clear +how [Model Input Objects](./README.md#model-input-object) are processed by the data provider prior to an +inference preprocessing pipeline. This can help users locate the correct version of the dataset used during model +inference or help them reproduce the data processing pipeline. For example: ```json -"processing:lineage": "GRD Post Processing", -"processing:level": "L1C", -"processing:facility": "Copernicus S1 Core Ground Segment - DPA", -"processing:software": { +{ + "processing:lineage": "GRD Post Processing", + "processing:level": "L1C", + "processing:facility": "Copernicus S1 Core Ground Segment - DPA", + "processing:software": { "Sentinel-1 IPF": "002.71" + } } ``` -STAC Items or STAC Assets with asset properties resulting from the model inference should be annotated with [`processing:level = L4`](https://github.com/stac-extensions/processing?tab=readme-ov-file#suggested-processing-levels). +STAC Items or STAC Assets resulting from the model inference should be +annotated with [`processing:level = L4`](https://github.com/stac-extensions/processing?tab=readme-ov-file#suggested-processing-levels) +(as described below) to indicate that they correspond from the output of an ML model. -> Model output or results from analyses of lower level data (i.e.,variables that are not directly measured by the instruments, but are derived from these measurements) +> processing:level = L4
+> Model output or results from analyses of lower level data (i.e.: variables that are not directly measured by the instruments, but are derived from these measurements) -TODO provide other suggestions on extensions to compose with this one. STAC ML AOI, STAC Label, ... +Furthermore, the [`processing:expression`](https://github.com/stac-extensions/processing?tab=readme-ov-file#expression-object) +should be specified with a reference to the STAC Item employing the MLM extension to provide full context of the source +of the derived product. + +A potential representation of a STAC Asset could be as follows: +```json +{ + "model-output": { + "processing:level": "L4", + "processing:expression": { + "format": "stac-mlm", + "expression": "" + } + } +} +``` + +### ML-AOI and Label Extensions + +Supervised machine learning models will typically employ a dataset of training, validation and test samples. +If those samples happen to be represented by STAC Collections and Items annotated with +the [ML-AOI Extension](https://github.com/stac-extensions/ml-aoi), notably with the corresponding `ml-aoi:split` +and all their annotations with [Label Extension](https://github.com/stac-extensions/label) references, the STAC Item +that contains the MLM Extension should include those STAC Collections in its `links` listing in order +to provide direct references to the training dataset that was employed for creating the model. + +Providing dataset references would, in combination with the training pipeline contained under an +[MLM Asset Object](README.md#assets-objects) annotated by the `mlm:training-runtime` role, +allow users to retrain the model for validation, or with adaptations to improve it, eventually +leading to a new MLM STAC Item definition. + +```json +{ + "id": "stac-item-model", + "stac_extensions": [ + "https://stac-extensions.github.io/mlm/v1.0.0/schema.json", + "https://stac-extensions.github.io/ml-aoi/v0.2.0/schema.json" + ], + "assets": { + "mlm:training": { + "title": "Model Training Pipeline", + "href": "docker.io/training/image:latest", + "type": "application/vnd.oci.image.index.v1+json", + "roles": ["mlm:training-runtime"] + } + }, + "links": [ + { + "rel": "derived_from", + "type": "application/json", + "href": "", + "ml-aoi:split": "train" + }, + { + "rel": "derived_from", + "type": "application/json", + "href": "", + "ml-aoi:split": "validate" + }, + { + "rel": "derived_from", + "type": "application/json", + "href": "", + "ml-aoi:split": "test" + } + ] +} +``` From 4d765c251fa785be5a9ec11427d03c4c6340e60c Mon Sep 17 00:00:00 2001 From: Francis Charette-Migneault Date: Fri, 29 Mar 2024 19:25:17 -0400 Subject: [PATCH 070/112] add yet again more best practices to integrate other STAC extensions --- best-practices.md | 76 ++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 75 insertions(+), 1 deletion(-) diff --git a/best-practices.md b/best-practices.md index e82d089..a9bb661 100644 --- a/best-practices.md +++ b/best-practices.md @@ -6,6 +6,15 @@ of your model and make life easier for client tooling and users. They come about implementors and introduce a bit more 'constraint' for those who are creating STAC objects representing their models or creating tools to work with STAC. +- [Using STAC Common Metadata Fields for the ML Model Extension](#using-stac-common-metadata-fields-for-the-ml-model-extension) +- [Recommended Extensions to Compose with the ML Model Extension](#recommended-extensions-to-compose-with-the-ml-model-extension) + - [Processing Extension](#processing-extension) + - [ML-AOI and Label Extensions](#ml-aoi-and-label-extensions) + - [Classification Extension](#classification-extension) + - [Scientific Extension](#scientific-extension) + - [File Extension](#file-extension) + - [Version Extension](#version-extension) + ## Using STAC Common Metadata Fields for the ML Model Extension It is recommended to use the `start_datetime` and `end_datetime`, `geometry`, and `bbox` to represent the @@ -74,7 +83,7 @@ to provide direct references to the training dataset that was employed for creat Providing dataset references would, in combination with the training pipeline contained under an [MLM Asset Object](README.md#assets-objects) annotated by the `mlm:training-runtime` role, allow users to retrain the model for validation, or with adaptations to improve it, eventually -leading to a new MLM STAC Item definition. +leading to a new MLM STAC Item definition (see also [STAC Version Extension](#version-extension)). ```json { @@ -113,3 +122,68 @@ leading to a new MLM STAC Item definition. ] } ``` + +### Classification Extension + +Since it is expected that a model will provide some kind of classification values as output, the +[Classification Extension](https://github.com/stac-extensions/classification) can be leveraged inside +MLM definition to indicate which class values can be contained in the resulting output from the model prediction. + +For more details, see the [Model Output Object](README.md#model-output-object) definition. + +### Scientific Extension + +Provided that most models derive from previous scientific work, it is strongly recommended to employ the +[Scientific Extension](https://github.com/stac-extensions/scientific) to provide references corresponding to the +original source of the model (`sci:doi`, `sci:citation`). This can help users find more information about the model, +its underlying architecture, or ways to improve it by piecing together the related work (`sci:publications`) that +lead to its creation. + +This extension can also be used for the purpose of publishing new models, by providing to users the necessary details +regarding how they should cite its use (i.e.: `sci:citation` field and `cite-as` relation type). + +### Version Extension + +In the even that a model is retrained with gradually added annotations or improved training strategies leading to +better performances, the existing model and newer models represented by STAC Items with MLM should also make use of +the [Version Extension](https://github.com/stac-extensions/version). Using the fields and link relation types defined +by this extension, the retraining cycle of the model can better be described, with a full history of the newer versions +developed. + +Additionally, the `version:experimental` field should be considered for models being trained and still under evaluation +before widespread deployment. This can be particularly useful for annotating models experiments during cross-validation +training process to find the "best model". This field could also be used to indicate if a model is provided for +educational purposes only. + +### File Extension + +In order to provide a reliable and reproducible machine learning pipeline, external references to data required by the +model should employ the [file](https://github.com/stac-extensions/file?tab=readme-ov-file#asset--link-object-fields) to +validate that they are properly retrieved for inference. + +One of the most typical case is the definition of an external file reference to model weights, often stored on a +Git LFS or S3 bucket due to their size. Providing the `file:checksum` and `file:size` for this file can help ensure +that the model is properly instantiated from the expected weights, or that sufficient storage is allocated to run it. + +```json +{ + "stac_extensions": [ + "https://stac-extensions.github.io/mlm/v1.0.0/schema.json", + "https://stac-extensions.github.io/file/v2.1.0/schema.json" + ], + "assets": { + "model": { + "type": "application/x-pytorch", + "href": "", + "roles": [ + "mlm:model", + "mlm:weights", + "data" + ], + "file:size": 123456789, + "file:checksum": "12209f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08", + "mlm:artifact_type": "torch.save" + } + } +} +``` From 4db3b947a4289f7138863266ea09cf3938629cb7 Mon Sep 17 00:00:00 2001 From: Francis Charette-Migneault Date: Fri, 29 Mar 2024 20:06:25 -0400 Subject: [PATCH 071/112] more best practices (relates to https://github.com/stac-extensions/classification/issues/48 and https://github.com/stac-extensions/example-links/issues/4) --- README.md | 6 +++++- best-practices.md | 49 ++++++++++++++++++++++++++++++++++------------- 2 files changed, 41 insertions(+), 14 deletions(-) diff --git a/README.md b/README.md index 6873633..034b916 100644 --- a/README.md +++ b/README.md @@ -70,7 +70,7 @@ extension to synthesize common use cases into a single reference for Machine Lea | mlm:total_parameters | integer | Total number of model parameters, including trainable and non-trainable parameters. | | mlm:pretrained_source | string | The source of the pretraining. Can refer to popular pretraining datasets by name (i.e. Imagenet) or less known datasets by URL and description. | | mlm:summary | string | Text summary of the model and it's purpose. | -| batch_size_suggestion | number | A suggested batch size for the accelerator and summarized hardware. | +| mlm:batch_size_suggestion | number | A suggested batch size for the accelerator and summarized hardware. | In addition, fields from the following extensions must be imported in the item: - [Scientific Extension Specification][stac-ext-sci] to describe relevant publications. @@ -233,6 +233,10 @@ Note that the URI including the specific commit hash, release number or target b other means of referring to checkout procedures, although this specification does not prohibit the use of additional properties to better describe the Asset. +Since the source code of a model provides useful example on how to use it, it is also recommended to define relevant +references to documentation using the `example` extension. +See the [Best Practices - Example Extension](best-practices.md#example-extension) section for more details. + Recommended asset `roles` include `code` and `metadata`, since the source code asset might also refer to more detailed metadata than this specification captures. diff --git a/best-practices.md b/best-practices.md index a9bb661..691d7a9 100644 --- a/best-practices.md +++ b/best-practices.md @@ -13,6 +13,7 @@ models or creating tools to work with STAC. - [Classification Extension](#classification-extension) - [Scientific Extension](#scientific-extension) - [File Extension](#file-extension) + - [Example Extension](#example-extension) - [Version Extension](#version-extension) ## Using STAC Common Metadata Fields for the ML Model Extension @@ -131,6 +132,9 @@ MLM definition to indicate which class values can be contained in the resulting For more details, see the [Model Output Object](README.md#model-output-object) definition. +> [!NOTE] +> Update according to https://github.com/stac-extensions/classification/issues/48 + ### Scientific Extension Provided that most models derive from previous scientific work, it is strongly recommended to employ the @@ -142,19 +146,6 @@ lead to its creation. This extension can also be used for the purpose of publishing new models, by providing to users the necessary details regarding how they should cite its use (i.e.: `sci:citation` field and `cite-as` relation type). -### Version Extension - -In the even that a model is retrained with gradually added annotations or improved training strategies leading to -better performances, the existing model and newer models represented by STAC Items with MLM should also make use of -the [Version Extension](https://github.com/stac-extensions/version). Using the fields and link relation types defined -by this extension, the retraining cycle of the model can better be described, with a full history of the newer versions -developed. - -Additionally, the `version:experimental` field should be considered for models being trained and still under evaluation -before widespread deployment. This can be particularly useful for annotating models experiments during cross-validation -training process to find the "best model". This field could also be used to indicate if a model is provided for -educational purposes only. - ### File Extension In order to provide a reliable and reproducible machine learning pipeline, external references to data required by the @@ -187,3 +178,35 @@ that the model is properly instantiated from the expected weights, or that suffi } } ``` + +### Example Extension + +In order to help users understand how to apply and run the described machine learning model, +the [Example Extension](https://github.com/stac-extensions/example-links#fields) can be used to provide code examples +demonstrating how it can be applied. + +For example, a [Model Card on Hugging Face](https://huggingface.co/docs/hub/en/model-cards) +is often provided (see [Hugging Face Model examples](https://huggingface.co/models)) to describe the model, which +can embed sample code and references to more details about the model. This kind of reference should be added under +the `links` of the STAC Item using MLM. + +Typically, a STAC Item using the MLM extension to describe the training or +inference strategies to apply a model should define the [Source Code Asset](README.md#source-code-asset). +This code is in itself ideal to guide users how to run it, and should therefore be replicated as an `example` link +reference to offer more code samples to execute the model. + +> [!NOTE] +> Update according to https://github.com/stac-extensions/example-links/issues/4 + +### Version Extension + +In the even that a model is retrained with gradually added annotations or improved training strategies leading to +better performances, the existing model and newer models represented by STAC Items with MLM should also make use of +the [Version Extension](https://github.com/stac-extensions/version). Using the fields and link relation types defined +by this extension, the retraining cycle of the model can better be described, with a full history of the newer versions +developed. + +Additionally, the `version:experimental` field should be considered for models being trained and still under evaluation +before widespread deployment. This can be particularly useful for annotating models experiments during cross-validation +training process to find the "best model". This field could also be used to indicate if a model is provided for +educational purposes only. From 669c9a35ad93be4594ae3eb410cf4b5919bf9023 Mon Sep 17 00:00:00 2001 From: Francis Charette-Migneault Date: Fri, 29 Mar 2024 20:23:26 -0400 Subject: [PATCH 072/112] adjustments from PR review --- README.md | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index 034b916..86882a4 100644 --- a/README.md +++ b/README.md @@ -60,16 +60,15 @@ extension to synthesize common use cases into a single reference for Machine Lea | mlm:tasks | [[Task Enum](#task-enum)] | **REQUIRED** Specifies the Machine Learning tasks for which the model can be used for. If multi-tasks outputs are provided by distinct model heads, specify all available tasks under the main properties and specify respective tasks in each [Model Output Object](#model-output-object). | | mlm:framework | string | **REQUIRED** Framework used to train the model (ex: PyTorch, TensorFlow). | | mlm:framework_version | string | **REQUIRED** The `framework` library version. Some models require a specific version of the machine learning `framework` to run. | -| mlm:file_size | integer | **REQUIRED** The size on disk of the model artifact (bytes). | | mlm:memory_size | integer | **REQUIRED** The in-memory size of the model on the accelerator during inference (bytes). | | mlm:input | [[Model Input Object](#model-input-object)] | **REQUIRED** Describes the transformation between the EO data and the model input. | | mlm:output | [[Model Output Object](#model-output-object)] | **REQUIRED** Describes each model output and how to interpret it. | -| mlm:accelerator | [Accelerator Enum](#accelerator-enum) | **REQUIRED** The intended computational hardware that runs inference. | -| mlm:accelerator_constrained | boolean | **REQUIRED** True if the intended `accelerator` is the only `accelerator` that can run inference. False if other accelerators, such as amd64 (CPU), can run inference. | -| mlm:hardware_summary | string | **REQUIRED** A high level description of the number of accelerators, specific generation of the `accelerator`, or other relevant inference details. | +| mlm:accelerator | [Accelerator Enum](#accelerator-enum) | The intended computational hardware that runs inference. If undefined, it should be assumed `amd64` (i.e.: CPU). | +| mlm:accelerator_constrained | boolean | Indicates if the intended `accelerator` is the only `accelerator` that can run inference. If undefined, it should be assumed `false`. | +| mlm:accelerator_summary | string | A high level description of the `accelerator`, such as its specific generation, or other relevant inference details. | +| mlm:accelerator_count | integer | A minimum amount of `accelerator` instances required to run the model. | | mlm:total_parameters | integer | Total number of model parameters, including trainable and non-trainable parameters. | -| mlm:pretrained_source | string | The source of the pretraining. Can refer to popular pretraining datasets by name (i.e. Imagenet) or less known datasets by URL and description. | -| mlm:summary | string | Text summary of the model and it's purpose. | +| mlm:pretrained_source | string \| null | The source of the pretraining. Can refer to popular pretraining datasets by name (i.e. Imagenet) or less known datasets by URL and description. If trained from scratch, the `null` value should be set explicitly. | | mlm:batch_size_suggestion | number | A suggested batch size for the accelerator and summarized hardware. | In addition, fields from the following extensions must be imported in the item: @@ -106,21 +105,18 @@ Fields that accept the `null` value can be considered `null` when omitted entire However, setting `null` explicitly when this information is known by the model provider can help users understand what is the expected behavior of the model. It is therefore recommended to provide `null` explicitly when applicable. -### Accelerator Enum +### Accelerator Type Enum It is recommended to define `accelerator` with one of the following values: - `amd64` models compatible with AMD or Intel CPUs (no hardware specific optimizations) - `cuda` models compatible with NVIDIA GPUs -- `xla` models compiled with XLA. models trained on TPUs are typically compiled with XLA. +- `xla` models compiled with XLA. Models trained on TPUs are typically compiled with XLA. - `amd-rocm` models trained on AMD GPUs - `intel-ipex-cpu` for models optimized with IPEX for Intel CPUs - `intel-ipex-gpu` for models optimized with IPEX for Intel GPUs - `macos-arm` for models trained on Apple Silicon -[stac-asset]: https://github.com/radiantearth/stac-spec/blob/master/collection-spec/collection-spec.md#asset-object - - ## Assets Objects | Field Name | Type | Description | @@ -136,6 +132,8 @@ names for nesting the Assets in order to improve their quick identification, alt left up to user preference. However, the MLM Asset definitions **MUST** include the appropriate [MLM Asset Roles](#mlm-asset-roles) to ensure their discovery. +[stac-asset]: https://github.com/radiantearth/stac-spec/blob/master/collection-spec/collection-spec.md#asset-object + ### MLM Asset Roles Asset `roles` should include relevant names that describe them. This does not only include From edcc8a2bfba1c64b7b5d4f5c47e971168ef26841 Mon Sep 17 00:00:00 2001 From: Francis Charette-Migneault Date: Fri, 29 Mar 2024 20:51:34 -0400 Subject: [PATCH 073/112] add more mlm:accelerator details (relates to https://github.com/crim-ca/dlm-extension/pull/2\#discussion_r1538309152) --- README.md | 132 +++++++++++++++++++++++++++++------------------------- 1 file changed, 72 insertions(+), 60 deletions(-) diff --git a/README.md b/README.md index 86882a4..12bf7d9 100644 --- a/README.md +++ b/README.md @@ -59,17 +59,17 @@ extension to synthesize common use cases into a single reference for Machine Lea | mlm:architecture | [Model Architecture](#model-architecture) string | **REQUIRED** A generic and well established architecture name of the model. | | mlm:tasks | [[Task Enum](#task-enum)] | **REQUIRED** Specifies the Machine Learning tasks for which the model can be used for. If multi-tasks outputs are provided by distinct model heads, specify all available tasks under the main properties and specify respective tasks in each [Model Output Object](#model-output-object). | | mlm:framework | string | **REQUIRED** Framework used to train the model (ex: PyTorch, TensorFlow). | -| mlm:framework_version | string | **REQUIRED** The `framework` library version. Some models require a specific version of the machine learning `framework` to run. | +| mlm:framework_version | string | The `framework` library version. Some models require a specific version of the machine learning `framework` to run. | | mlm:memory_size | integer | **REQUIRED** The in-memory size of the model on the accelerator during inference (bytes). | -| mlm:input | [[Model Input Object](#model-input-object)] | **REQUIRED** Describes the transformation between the EO data and the model input. | -| mlm:output | [[Model Output Object](#model-output-object)] | **REQUIRED** Describes each model output and how to interpret it. | -| mlm:accelerator | [Accelerator Enum](#accelerator-enum) | The intended computational hardware that runs inference. If undefined, it should be assumed `amd64` (i.e.: CPU). | +| mlm:accelerator | [Accelerator Enum](#accelerator-enum) \| null | The intended computational hardware that runs inference. If undefined or set to `null` explicitly, the model does not require any specific accelerator. | | mlm:accelerator_constrained | boolean | Indicates if the intended `accelerator` is the only `accelerator` that can run inference. If undefined, it should be assumed `false`. | | mlm:accelerator_summary | string | A high level description of the `accelerator`, such as its specific generation, or other relevant inference details. | | mlm:accelerator_count | integer | A minimum amount of `accelerator` instances required to run the model. | | mlm:total_parameters | integer | Total number of model parameters, including trainable and non-trainable parameters. | | mlm:pretrained_source | string \| null | The source of the pretraining. Can refer to popular pretraining datasets by name (i.e. Imagenet) or less known datasets by URL and description. If trained from scratch, the `null` value should be set explicitly. | | mlm:batch_size_suggestion | number | A suggested batch size for the accelerator and summarized hardware. | +| mlm:input | [[Model Input Object](#model-input-object)] | **REQUIRED** Describes the transformation between the EO data and the model input. | +| mlm:output | [[Model Output Object](#model-output-object)] | **REQUIRED** Describes each model output and how to interpret it. | In addition, fields from the following extensions must be imported in the item: - [Scientific Extension Specification][stac-ext-sci] to describe relevant publications. @@ -87,6 +87,74 @@ Note that this field is not an explicit "Enum", and is used only as an indicator If no specific or predefined architecture can be associated with the described model, simply employ `unknown` or another custom name as deemed appropriate. +### Task Enum + +It is recommended to define `mlm:tasks` of the entire model at the STAC Item level, +and `tasks` of respective [Model Output Object](#model-output-object) with the following values. +Although other values are permitted to support more use cases, they should be used sparingly to allow better +interoperability of models and their representation. + +As a general rule of thumb, if a task is not represented below, an appropriate name can be formulated by taking +definitions listed in [Papers With Code](https://paperswithcode.com/sota). The names +should be normalized to lowercase and use hyphens instead of spaces. + +| Task Name | Corresponding `label:tasks` | Description | +|-------------------------|-----------------------------|-----------------------------------------------------------------------------------------------------------------| +| `regression` | `regression` | Generic regression that estimates a numeric and continuous value. | +| `classification` | `classification` | Generic classification task that assigns class labels to an output. | +| `scene-classification` | *n/a* | Specific classification task where the model assigns a single class label to an entire scene/area. | +| `detection` | `detection` | Generic detection of the "presence" of objects or entities, with or without positions. | +| `object-detection` | *n/a* | Task corresponding to the identification of positions as bounding boxes of object detected in the scene. | +| `segmentation` | `segmentation` | Generic tasks that regroups all types of segmentations tasks consisting of applying labels to pixels. | +| `semantic-segmentation` | *n/a* | Specific segmentation task where all pixels are attributed labels, without consideration of similar instances. | +| `instance-segmentation` | *n/a* | Specific segmentation task that assigns distinct labels for groups of pixels corresponding to object instances. | +| `panoptic-segmentation` | *n/a* | Specific segmentation task that combines instance segmentation of objects and semantic labels for non-objects. | +| `similarity-search` | *n/a* | Generic task to identify whether a query input corresponds to another reference within a corpus. | +| `image-captioning` | *n/a* | Specific task of describing the content of an image in words. | +| `generative` | *n/a* | Generic task that encompasses all synthetic data generation techniques. | +| `super-resolution` | *n/a* | Specific task that increases the quality and resolution of an image by increasing its high-frequency details. | + +If the task falls within the category of supervised machine learning and uses labels during training, +this should align with the `label:tasks` values defined in [STAC Label Extension][stac-ext-label-props] for relevant +STAC Collections and Items published with the model described by this extension. + +It is to be noted that multiple "*generic*" tasks names (`classification`, `detection`, etc.) are defined to allow +correspondance with `label:tasks`, but these can lead to some ambiguity depending on context. For example, a model +that supports `classification` could mean that the model can predict patch-based classes over an entire scene +(i.e.: `scene-classification` for a single prediction over an entire area of interest as a whole), +or that it can predict pixel-wise "classifications", such as land-cover labels for +every single pixel coordinate over the area of interest. Maybe counter-intuitively to some users, +such a model that produces pixel-wise "classifications" should be attributed the `segmentation` task +(and more specifically `semantic-segmentation`) rather than `classification`. To avoid this kind of ambiguity, +it is strongly recommended that `tasks` always aim to provide the most specific definitions possible to explicitly +describe what the model accomplishes. + +[stac-ext-label-props]: https://github.com/stac-extensions/label#item-properties + +### Accelerator Type Enum + +It is recommended to define `accelerator` with one of the following values: + +- `amd64` models compatible with AMD or Intel CPUs (no hardware specific optimizations) +- `cuda` models compatible with NVIDIA GPUs +- `xla` models compiled with XLA. Models trained on TPUs are typically compiled with XLA. +- `amd-rocm` models trained on AMD GPUs +- `intel-ipex-cpu` for models optimized with IPEX for Intel CPUs +- `intel-ipex-gpu` for models optimized with IPEX for Intel GPUs +- `macos-arm` for models trained on Apple Silicon + +> [!WARNING] +> If `mlm:accelerator = amd64`, this explicitly indicates that the model does not (and will not try to) use any +> accelerator, even if some are available from the runtime environment. This is to be distinguished from +> the value `mlm:accelerator = null`, which means that the model *could* make use of some accelerators if provided, +> but is not constrained by any specific one. To improve comprehension by users, it is recommended that any model +> using `mlm:accelerator = amd64` also set explicitly `mlm:accelerator_constrained = true` to illustrate that the +> model **WILL NOT** use accelerators, although the hardware resolution should be identical nonetheless. + +When `mlm:accelerator = null` is employed, the value of `mlm:accelerator_constrained` can be ignored, since even if +set to `true`, there would be no `accelerator` to contain against. To avoid confusion, it is suggested to set the +`mlm:accelerator_constrained = false` or omit the field entirely in this case. + ### Model Input Object | Field Name | Type | Description | @@ -105,18 +173,6 @@ Fields that accept the `null` value can be considered `null` when omitted entire However, setting `null` explicitly when this information is known by the model provider can help users understand what is the expected behavior of the model. It is therefore recommended to provide `null` explicitly when applicable. -### Accelerator Type Enum - -It is recommended to define `accelerator` with one of the following values: - -- `amd64` models compatible with AMD or Intel CPUs (no hardware specific optimizations) -- `cuda` models compatible with NVIDIA GPUs -- `xla` models compiled with XLA. Models trained on TPUs are typically compiled with XLA. -- `amd-rocm` models trained on AMD GPUs -- `intel-ipex-cpu` for models optimized with IPEX for Intel CPUs -- `intel-ipex-gpu` for models optimized with IPEX for Intel GPUs -- `macos-arm` for models trained on Apple Silicon - ## Assets Objects | Field Name | Type | Description | @@ -329,50 +385,6 @@ the [Data Types from the STAC Raster extension][raster-data-types] should be use While only `task` is a required field, all fields are recommended for supervised tasks that produce a fixed shape tensor and have output classes. `image-captioning`, `multi-modal`, and `generative` tasks may not return fixed shape tensors or classes. -#### Task Enum - -It is recommended to define `mlm:tasks` of the entire model at the STAC Item level, -and `tasks` of respective [Model Output Object](#model-output-object) with the following values. -Although other values are permitted to support more use cases, they should be used sparingly to allow better -interoperability of models and their representation. - -As a general rule of thumb, if a task is not represented below, an appropriate name can be formulated by taking -definitions listed in [Papers With Code](https://paperswithcode.com/sota). The names -should be normalized to lowercase and use hyphens instead of spaces. - -| Task Name | Corresponding `label:tasks` | Description | -|-------------------------|-----------------------------|-----------------------------------------------------------------------------------------------------------------| -| `regression` | `regression` | Generic regression that estimates a numeric and continuous value. | -| `classification` | `classification` | Generic classification task that assigns class labels to an output. | -| `scene-classification` | *n/a* | Specific classification task where the model assigns a single class label to an entire scene/area. | -| `detection` | `detection` | Generic detection of the "presence" of objects or entities, with or without positions. | -| `object-detection` | *n/a* | Task corresponding to the identification of positions as bounding boxes of object detected in the scene. | -| `segmentation` | `segmentation` | Generic tasks that regroups all types of segmentations tasks consisting of applying labels to pixels. | -| `semantic-segmentation` | *n/a* | Specific segmentation task where all pixels are attributed labels, without consideration of similar instances. | -| `instance-segmentation` | *n/a* | Specific segmentation task that assigns distinct labels for groups of pixels corresponding to object instances. | -| `panoptic-segmentation` | *n/a* | Specific segmentation task that combines instance segmentation of objects and semantic labels for non-objects. | -| `similarity-search` | *n/a* | Generic task to identify whether a query input corresponds to another reference within a corpus. | -| `image-captioning` | *n/a* | Specific task of describing the content of an image in words. | -| `generative` | *n/a* | Generic task that encompasses all synthetic data generation techniques. | -| `super-resolution` | *n/a* | Specific task that increases the quality and resolution of an image by increasing its high-frequency details. | - -If the task falls within the category of supervised machine learning and uses labels during training, -this should align with the `label:tasks` values defined in [STAC Label Extension][stac-ext-label-props] for relevant -STAC Collections and Items published with the model described by this extension. - -It is to be noted that multiple "*generic*" tasks names (`classification`, `detection`, etc.) are defined to allow -correspondance with `label:tasks`, but these can lead to some ambiguity depending on context. For example, a model -that supports `classification` could mean that the model can predict patch-based classes over an entire scene -(i.e.: `scene-classification` for a single prediction over an entire area of interest as a whole), -or that it can predict pixel-wise "classifications", such as land-cover labels for -every single pixel coordinate over the area of interest. Maybe counter-intuitively to some users, -such a model that produces pixel-wise "classifications" should be attributed the `segmentation` task -(and more specifically `semantic-segmentation`) rather than `classification`. To avoid this kind of ambiguity, -it is strongly recommended that `tasks` always aim to provide the most specific definitions possible to explicitly -describe what the model accomplishes. - -[stac-ext-label-props]: https://github.com/stac-extensions/label#item-properties - #### Result Structure Object | Field Name | Type | Description | From 06ee0eff39ca7c77412e17ad1a2c1b3e2469ebe6 Mon Sep 17 00:00:00 2001 From: Francis Charette-Migneault Date: Fri, 29 Mar 2024 21:08:08 -0400 Subject: [PATCH 074/112] add details about link releation types --- README.md | 13 +++++++++---- best-practices.md | 20 ++++++++++++++++++++ 2 files changed, 29 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 12bf7d9..f3a92b3 100644 --- a/README.md +++ b/README.md @@ -401,11 +401,16 @@ See the documentation for the ## Relation types The following types should be used as applicable `rel` types in the -[Link Object](https://github.com/radiantearth/stac-spec/tree/master/item-spec/item-spec.md#link-object) of STAC Items describing Band Assets used with a model. +[Link Object](https://github.com/radiantearth/stac-spec/tree/master/item-spec/item-spec.md#link-object) +of STAC Items describing Band Assets that result from the inference of a model described by the MLM extension. -| Type | Description | -|--------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| derived_from | This link points to _item.json or _collection.json. Replace with the unique [`mlm:name`](#item-properties-and-collection-fields) field's value. | +| Type | Description | +|--------------|----------------------------------------------------------------------------------------------------------------------------------------------| +| derived_from | This link points to a STAC Collection or Item using MLM, using the corresponding [`mlm:name`](#item-properties-and-collection-fields) value. | + +Note that a derived product from model inference described by STAC should also consider using +additional indications that it came of a model, such as described by +the [Best Practices - Processing Extension](best-practices.md#processing-extension). ## Contributing diff --git a/best-practices.md b/best-practices.md index 691d7a9..c5c3e56 100644 --- a/best-practices.md +++ b/best-practices.md @@ -63,6 +63,7 @@ A potential representation of a STAC Asset could be as follows: ```json { "model-output": { + "mlm:name": "", + "mlm:name": " Date: Fri, 29 Mar 2024 22:14:23 -0400 Subject: [PATCH 075/112] add details about dimensions and tasks --- README.md | 37 ++++++++++++++++++++++--------------- 1 file changed, 22 insertions(+), 15 deletions(-) diff --git a/README.md b/README.md index f3a92b3..1839db5 100644 --- a/README.md +++ b/README.md @@ -366,24 +366,31 @@ the [Data Types from the STAC Raster extension][raster-data-types] should be use #### Input Structure Object -| Field Name | Type | Description | -|------------|-----------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| shape | [integer] | **REQUIRED** Shape of the input n-dimensional array ($N \times C \times H \times W$), including the batch size dimension. The batch size dimension must either be greater than 0 or -1 to indicate an unspecified batch dimension size. | -| dim_order | string | **REQUIRED** How the above dimensions are ordered within the `shape`. `bhw`, `bchw`, `bthw`, `btchw` are valid orderings where `b`=batch, `c`=channel, `t`=time, `h`=height, `w`=width. | -| data_type | [Data Type Enum](#data-type-enum) | **REQUIRED** The data type of values in the n-dimensional array. For model inputs, this should be the data type of the processed input supplied to the model inference function, not the data type of the source bands. | +| Field Name | Type | Description | +|------------|-----------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| shape | [integer] | **REQUIRED** Shape of the input n-dimensional array ($N \times C \times H \times W$), including the batch size dimension. Each dimension must either be greater than 0 or -1 to indicate a variable dimension size. | +| dim_order | string | **REQUIRED** How the above dimensions are ordered within the `shape`. `bhw`, `bchw`, `bthw`, `btchw` are valid orderings where `b`=batch, `c`=channel, `t`=time, `h`=height, `w`=width. | +| data_type | [Data Type Enum](#data-type-enum) | **REQUIRED** The data type of values in the n-dimensional array. For model inputs, this should be the data type of the processed input supplied to the model inference function, not the data type of the source bands. | -### Model Output Object - -| Field Name | Type | Description | -|--------------------------|-----------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| tasks | [[Task Enum](#task-enum)] | **REQUIRED** Specifies the Machine Learning tasks for which the output can be used for. This can be a subset of `mlm:tasks` defined under the Item `properties` as applicable. | -| result | [Result Structure Object](#result-structure-object) | The structure that describes the resulting output arrays/tensors from one model head. | -| classification:classes | [[Class Object](#class-object)] | A list of class objects adhering to the [Classification extension](https://github.com/stac-extensions/classification). | -| post_processing_function | string | A url to the postprocessing function where normalization, rescaling, and other operations take place.. Or, instead, the function code path, for example: `my_package.my_module.my_processing_function` | +A common use of `-1` for one dimension of `shape` is to indicate a variable batch-size. +However, this value is not strictly reserved for the `b` dimension. +For example, if the model is capable of automatically adjusting its input layer to adapt to the provided input data, +then the corresponding dimensions that can be adapted can employ `-1` as well. +### Model Output Object -While only `task` is a required field, all fields are recommended for supervised tasks that produce a fixed shape tensor and have output classes. -`image-captioning`, `multi-modal`, and `generative` tasks may not return fixed shape tensors or classes. +| Field Name | Type | Description | +|--------------------------|-----------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| tasks | [[Task Enum](#task-enum)] | **REQUIRED** Specifies the Machine Learning tasks for which the output can be used for. This can be a subset of `mlm:tasks` defined under the Item `properties` as applicable. | +| result | [Result Structure Object](#result-structure-object) | The structure that describes the resulting output arrays/tensors from one model head. | +| classification:classes | [[Class Object](#class-object)] | A list of class objects adhering to the [Classification Extension](https://github.com/stac-extensions/classification). | +| post_processing_function | string | A url to the postprocessing function where normalization, rescaling, and other operations take place.. Or, instead, the function code path, for example: `my_package.my_module:my_processing_function`. | + +While only `tasks` is a required field, all fields are recommended for tasks that produce a fixed +shape tensor and have output classes. Outputs that have variable dimensions, can define the `result` with the +appropriate dimension value `-1` in the `shape` field. When the model does not produce specific classes, such +as for `regression`, `image-captioning`, `super-resolution` and some `generative` tasks, to name a few, the +`classification:classes` can be omitted. #### Result Structure Object From 1faf4d9922746ac285a3ff5793c76a34a299aaf3 Mon Sep 17 00:00:00 2001 From: Francis Charette-Migneault Date: Tue, 2 Apr 2024 19:50:58 -0400 Subject: [PATCH 076/112] more examples and details --- README.md | 46 ++++++++++++++++++++++++++++++++++++---------- best-practices.md | 33 ++++++++++++++++++++++++++++----- 2 files changed, 64 insertions(+), 15 deletions(-) diff --git a/README.md b/README.md index 1839db5..783d028 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,7 @@ - **Title:** Machine Learning Model Extension - **Identifier:** [https://schemas.stacspec.org/2.0.0.alpha.0/extensions/ml-model/json-schema/schema.json](https://schemas.stacspec.org/2.0.0.alpha.0/extensions/ml-model/json-schema/schema.json) - **Field Name Prefix:** mlm -- **Scope:** Item, Collection +- **Scope:** Collection, Item, Asset, Links - **Extension Maturity Classification:** Proposal - **Owner:** - [@fmigneault](https://github.com/fmigneault) @@ -19,7 +19,8 @@ trained on overhead imagery and enable running model inference. The main objectives of the extension are: 1) to enable building model collections that can be searched alongside associated STAC datasets -2) record all necessary bands, parameters, modeling artifact locations, and high-level processing steps to deploy an inference service. +2) record all necessary bands, parameters, modeling artifact locations, and high-level processing steps to deploy + an inference service. Specifically, this extension records the following information to make ML models searchable and reusable: 1. Sensor band specifications @@ -31,7 +32,8 @@ Specifically, this extension records the following information to make ML models The MLM specification is biased towards providing metadata fields for supervised machine learning models. However, fields that relate to supervised ML are optional and users can use the fields they need for different tasks. -See [Best Practices](./best-practices.md) for guidance on what other STAC extensions you should use in conjunction with this extension. +See [Best Practices](./best-practices.md) for guidance on what other STAC extensions you should use in conjunction +with this extension. The Machine Learning Model Extension purposely omits and delegates some definitions to other STAC extensions to favor reusability and avoid metadata duplication whenever possible. A properly defined MLM STAC Item/Collection should almost never have the Machine Learning Model Extension exclusively in `stac_extensions`. @@ -53,6 +55,14 @@ extension to synthesize common use cases into a single reference for Machine Lea ## Item Properties and Collection Fields +The fields in the table below can be used in these parts of STAC documents: + +- [ ] Catalogs +- [x] Collections +- [x] Item Properties (incl. Summaries in Collections) +- [x] Assets (for both Collections and Items, incl. Item Asset Definitions in Collections, except `mlm:name`) +- [ ] Links + | Field Name | Type | Description | |-----------------------------|--------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | mlm:name | string | **REQUIRED** A unique name for the model. This can include, but must be distinct, from simply naming the model architecture. If there is a publication or other published work related to the model, use the official name of the model. | @@ -60,7 +70,7 @@ extension to synthesize common use cases into a single reference for Machine Lea | mlm:tasks | [[Task Enum](#task-enum)] | **REQUIRED** Specifies the Machine Learning tasks for which the model can be used for. If multi-tasks outputs are provided by distinct model heads, specify all available tasks under the main properties and specify respective tasks in each [Model Output Object](#model-output-object). | | mlm:framework | string | **REQUIRED** Framework used to train the model (ex: PyTorch, TensorFlow). | | mlm:framework_version | string | The `framework` library version. Some models require a specific version of the machine learning `framework` to run. | -| mlm:memory_size | integer | **REQUIRED** The in-memory size of the model on the accelerator during inference (bytes). | +| mlm:memory_size | integer | The in-memory size of the model on the accelerator during inference (bytes). | | mlm:accelerator | [Accelerator Enum](#accelerator-enum) \| null | The intended computational hardware that runs inference. If undefined or set to `null` explicitly, the model does not require any specific accelerator. | | mlm:accelerator_constrained | boolean | Indicates if the intended `accelerator` is the only `accelerator` that can run inference. If undefined, it should be assumed `false`. | | mlm:accelerator_summary | string | A high level description of the `accelerator`, such as its specific generation, or other relevant inference details. | @@ -71,9 +81,22 @@ extension to synthesize common use cases into a single reference for Machine Lea | mlm:input | [[Model Input Object](#model-input-object)] | **REQUIRED** Describes the transformation between the EO data and the model input. | | mlm:output | [[Model Output Object](#model-output-object)] | **REQUIRED** Describes each model output and how to interpret it. | -In addition, fields from the following extensions must be imported in the item: -- [Scientific Extension Specification][stac-ext-sci] to describe relevant publications. -- [Version Extension Specification][stac-ext-ver] to define version tags. +To decide whether above fields should be applied under Item `properties` or under respective Assets, the context of +each field must be considered. For example, the `mlm:name` should always be provided in the Item `properties`, since +it relates to the model as a whole. In contrast, some models could support multiple `mlm:accelerator`, which could be +handled by distinct source code represented by different Assets. In such case, `mlm:accelerator` definitions should be +nested under their relevant Asset. If a field is defined both at the Item and Asset level, the value at the Asset level +would be considered for that specific Asset, and the value at the Item level would be used for other Assets that did +not override it for their respective reference. For some of the fields, further details are provided in following +sections to provide more precisions regarding some potentially ambiguous use cases. + +In addition, fields from the multiple relevant extensions should be defined as applicable. See +[Best Practices - Recommended Extensions to Compose with the ML Model Extension](best-practices.md#recommended-extensions-to-compose-with-the-ml-model-extension) +for more details. + +For the [Extent Object](https://github.com/radiantearth/stac-spec/blob/master/collection-spec/collection-spec.md#extent-object) +in STAC Collections and the corresponding spatial and temporal fields in Items, please refer to section +[Best Practices - Using STAC Common Metadata Fields for the ML Model Extension](best-practices.md#using-stac-common-metadata-fields-for-the-ml-model-extension). [stac-ext-sci]: https://github.com/radiantearth/stac-spec/tree/v1.0.0-beta.2/extensions/scientific/README.md [stac-ext-ver]: https://github.com/radiantearth/stac-spec/tree/v1.0.0-beta.2/extensions/version/README.md @@ -411,9 +434,12 @@ The following types should be used as applicable `rel` types in the [Link Object](https://github.com/radiantearth/stac-spec/tree/master/item-spec/item-spec.md#link-object) of STAC Items describing Band Assets that result from the inference of a model described by the MLM extension. -| Type | Description | -|--------------|----------------------------------------------------------------------------------------------------------------------------------------------| -| derived_from | This link points to a STAC Collection or Item using MLM, using the corresponding [`mlm:name`](#item-properties-and-collection-fields) value. | +| Type | Description | +|--------------|----------------------------------------------------------| +| derived_from | This link points to a STAC Collection or Item using MLM. | + +It is recommended that the link using `derived_from` referring to another STAC definition using the MLM extension +specifies the [`mlm:name`](#item-properties-and-collection-fields) value to make the derived reference more explicit. Note that a derived product from model inference described by STAC should also consider using additional indications that it came of a model, such as described by diff --git a/best-practices.md b/best-practices.md index c5c3e56..9c55c4f 100644 --- a/best-practices.md +++ b/best-practices.md @@ -18,12 +18,35 @@ models or creating tools to work with STAC. ## Using STAC Common Metadata Fields for the ML Model Extension -It is recommended to use the `start_datetime` and `end_datetime`, `geometry`, and `bbox` to represent the -recommended context of data the model was trained with and for which the model should have appropriate domain -knowledge for inference. For example, we can consider a model which is trained on imagery from all over the world +It is recommended to use the `start_datetime` and `end_datetime`, `geometry`, and `bbox` in a STAC Item, +and the corresponding +[Extent Object](https://github.com/radiantearth/stac-spec/blob/master/collection-spec/collection-spec.md#extent-object) +in a Collection, to represent the *recommended context* of the data the model was trained with and for which the model +should have appropriate domain knowledge for inference. + +For example, if a model was trained using the [EuroSAT][EuroSAT-github] dataset, and represented using MLM, it would +be reasonable to describe it with a time range of 2015-2018 and an area corresponding to the European Urban Atlas, as +described by the [EuroSAT paper][EuroSAT-paper]. However, it could also be considered adequate to define a wider extent, +considering that it would not be unexpected to have reasonably similar classes and domain distribution in following +years and in other locations. Provided that the exact extent applicable for a model is difficult to define reliably, +it is left to the good judgement of users to provide adequate values. Note that users employing the model can also +choose to apply it for contexts outside the *recommended* extent for the same reason. + +[EuroSAT-github]: https://github.com/phelber/EuroSAT +[EuroSAT-paper]: https://www.researchgate.net/publication/319463676 + +As another example, let us consider a model which is trained on imagery from all over the world and is robust enough to be applied to any time period. In this case, the common metadata to use with the model -would include the bbox of "the world" `[-90, -180, 90, 180]` and the `start_datetime` and `end_datetime` range could -be generic values like `["1900-01-01", null]`. +could include the bbox of "the world" `[-90, -180, 90, 180]` and the `start_datetime` and `end_datetime` range could +be generic values like `["1900-01-01", null]`. However, it is to be noted that generic and very broad spatiotemporal +extents like these rarely reflect the reality regarding the capabilities and precision of the model to predict reliable +results. If a more restrained area and time of interest can be identified, such as the ranges for which the training +dataset applies, or a test split dataset that validates the applicability of the model on other domains, those should +be provided instead. + +If specific datasets with training/validation/test splits are known to support the claims of the suggested extent for +the model, it is recommended that they are included as reference to the STAC Item/Collection using MLM. For more +information regarding these references, see the [ML-AOI and Label Extensions](#ml-aoi-and-label-extensions) details. ## Recommended Extensions to Compose with the ML Model Extension From 501971a750646ee4d06e9166f6929756a905f235 Mon Sep 17 00:00:00 2001 From: Francis Charette-Migneault Date: Tue, 2 Apr 2024 19:52:23 -0400 Subject: [PATCH 077/112] [wip] updating JSON-schema with MLM fields --- json-schema/schema.json | 138 ++++++++++++++++++++++++---------------- 1 file changed, 83 insertions(+), 55 deletions(-) diff --git a/json-schema/schema.json b/json-schema/schema.json index e50c214..6a2da45 100644 --- a/json-schema/schema.json +++ b/json-schema/schema.json @@ -1,50 +1,104 @@ { "$schema": "http://json-schema.org/draft-07/schema#", - "$id": "https://schemas.stacspec.org/v1.0.0-beta.3/extensions/dl-model/json-schema/schema.json", + "$id": "https://schemas.stacspec.org/v1.0.0/extensions/mlm/json-schema/schema.json", "title": "DL Model Item", - "description": "This object represents the metadata for a Deep Learning (DL) model item in a DL Catalog.", - "allOf": [ + "description": "This object represents the metadata for a Machine Learning Model (MLM).", + "oneOf": [ { - "$ref": "https://schemas.stacspec.org/v1.0.0/item-spec/json-schema/item.json" - }, - { - "$ref": "#/definitions/dl-model" + "$comment": "This is the schema for STAC extension MLM in Items.", + "allOf": [ + { + "type": "object", + "required": [ + "type", + "properties", + "assets" + ], + "properties": { + "type": { + "const": "Feature" + }, + "properties": { + "allOf": [ + { + "required": [ + "mlm:name", + "mlm:architecture", + "mlm:framework", + "mlm:tasks", + "mlm:input", + "mlm:output" + ] + }, + { + "$ref": "#/definitions/fields" + } + ] + }, + "assets": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/fields" + } + } + } + }, + { + "$ref": "#/definitions/stac_extensions_mlm" + } + ] }, { - "$ref": "#/definitions/dlm:properties" + "$comment": "This is the schema for STAC extension MLM in Collections.", + "allOf": [ + { + "type": "object", + "required": [ + "type", + "summaries" + ], + "properties": { + "type": { + "const": "Collection" + }, + "assets": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/fields" + } + } + } + } + ] } ], "definitions": { - "dl-model": { + "stac_extensions_mlm": { "type": "object", "required": [ - "stac_extensions", - "properties", - "assets" + "stac_extensions" ], "properties": { "stac_extensions": { - "type": "object", - "required": [ - "stac_extensions" - ], - "properties": { - "stac_extensions": { - "type": "array", - "contains": { - "enum": [ - "dl-model", - "https://schemas.stacspec.org/v1.0.0-beta.3/extensions/dl-model/json-schema/schema.json" - ] - } - } + "type": "array", + "contains": { + "const": "https://schemas.stacspec.org/v1.0.0/extensions/mlm/json-schema/schema.json" } - }, + } + } + }, + "fields": { + + + + }, + "properties": { "properties": { "type": "object", "required": [ - "dlm:inputs", - "dlm:outputs", + "mlm:name", + "mlm:input", + "mlm:output", "dlm:runtime", "dlm:archive", "dlm:data" @@ -529,31 +583,5 @@ } } } - }, - "dlm:properties": { - "type": "object", - "required": [ - "properties" - ], - "properties": { - "properties": { - "$comment": "Optional metadata that provides more details about provenance.", - "anyOf": [ - { - "$ref": "https://schemas.stacspec.org/v1.0.0-beta.2/item-spec/json-schema/instrument.json" - }, - { - "$ref": "https://schemas.stacspec.org/v1.0.0-beta.2/item-spec/json-schema/licensing.json" - }, - { - "$ref": "https://schemas.stacspec.org/v1.0.0-beta.2/item-spec/json-schema/provider.json" - }, - { - "$ref": "https://schemas.stacspec.org/v1.0.0-beta.2/item-spec/json-schema/datetime.json" - } - ] - } - } } - } } From 6ec1cd5d850208a338dfb052e61994b722223c4f Mon Sep 17 00:00:00 2001 From: Francis Charette-Migneault Date: Thu, 4 Apr 2024 01:04:50 -0400 Subject: [PATCH 078/112] [wip] more updates to JSON schema for MLM definitions --- CHANGELOG.md | 13 +- README.md | 331 ++++++++++++++-------- json-schema/schema.json | 605 ++++++++++++++++++++++++++++++++++------ stac_model/schema.py | 14 +- tests/test_schema.py | 2 + 5 files changed, 742 insertions(+), 223 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a205c8b..f191358 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,19 +18,26 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - add `scene-classification` to the Enum Tasks to allow disambiguation between pixel-wise and patch-based classification ### Changed -- reorganized `dlm:architecture` nested fields to exist at the top level of properties as `mlm:name`, `mlm:summary` and so on to provide STAC API search capabilities. +- reorganized `dlm:architecture` nested fields to exist at the top level of properties as `mlm:name`, `mlm:summary` + and so on to provide STAC API search capabilities. - replaced `normalization:mean`, etc. with [statistics](./README.md#bands-and-statistics) from STAC 1.1 common metadata - added `pydantic` models for internal schema objects in `stac_model` package and published to PYPI - specified [rel_type](./README.md#relation-types) to be `derived_from` and specify how model item or collection json should be named - replaced all Enum Tasks names to use hyphens instead of spaces - replaced `dlm:task` by `mlm:tasks` using an array of value instead of a single one, allowing models to represent multiple tasks they support simultaneously or interchangeably depending on context +- replace `pre_processing_function` and `post_processing_function` to use similar definitions + to the [Processing Extension - Expression Object](https://github.com/stac-extensions/processing#expression-object) + such that more extended definitions of custom processors can be defined. +- updated JSON schema to reflect changes of MLM fields ### Deprecated -- +- any `dlm`-prefixed field or property ### Removed -- Data Object, replaced with [Model Input Object](./README.md#model-input-object) that uses the `name` field from the [common metadata band object](https://github.com/radiantearth/stac-spec/blob/f9b3c59ba810541c9da70c5f8d39635f8cba7bcd/item-spec/common-metadata.md#bands) which also records `data_type` and `nodata` type +- Data Object, replaced with [Model Input Object](./README.md#model-input-object) that uses the `name` field from + the [common metadata band object](https://github.com/radiantearth/stac-spec/blob/f9b3c59ba810541c9da70c5f8d39635f8cba7bcd/item-spec/common-metadata.md#bands) + which also records `data_type` and `nodata` type # TODO link release here diff --git a/README.md b/README.md index 783d028..929a66a 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,7 @@ [![hackmd-github-sync-badge](https://hackmd.io/lekSD_RVRiquNHRloXRzeg/badge)](https://hackmd.io/lekSD_RVRiquNHRloXRzeg?both) - **Title:** Machine Learning Model Extension -- **Identifier:** [https://schemas.stacspec.org/2.0.0.alpha.0/extensions/ml-model/json-schema/schema.json](https://schemas.stacspec.org/2.0.0.alpha.0/extensions/ml-model/json-schema/schema.json) +- **Identifier:** [https://stac-extensions.github.io/mlm/v1.0.0/schema.json](https://stac-extensions.github.io/mlm/v1.0.0/schema.json) - **Field Name Prefix:** mlm - **Scope:** Collection, Item, Asset, Links - **Extension Maturity Classification:** Proposal @@ -67,19 +67,19 @@ The fields in the table below can be used in these parts of STAC documents: |-----------------------------|--------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | mlm:name | string | **REQUIRED** A unique name for the model. This can include, but must be distinct, from simply naming the model architecture. If there is a publication or other published work related to the model, use the official name of the model. | | mlm:architecture | [Model Architecture](#model-architecture) string | **REQUIRED** A generic and well established architecture name of the model. | -| mlm:tasks | [[Task Enum](#task-enum)] | **REQUIRED** Specifies the Machine Learning tasks for which the model can be used for. If multi-tasks outputs are provided by distinct model heads, specify all available tasks under the main properties and specify respective tasks in each [Model Output Object](#model-output-object). | +| mlm:tasks | \[[Task Enum](#task-enum)] | **REQUIRED** Specifies the Machine Learning tasks for which the model can be used for. If multi-tasks outputs are provided by distinct model heads, specify all available tasks under the main properties and specify respective tasks in each [Model Output Object](#model-output-object). | | mlm:framework | string | **REQUIRED** Framework used to train the model (ex: PyTorch, TensorFlow). | | mlm:framework_version | string | The `framework` library version. Some models require a specific version of the machine learning `framework` to run. | | mlm:memory_size | integer | The in-memory size of the model on the accelerator during inference (bytes). | +| mlm:total_parameters | integer | Total number of model parameters, including trainable and non-trainable parameters. | +| mlm:pretrained_source | string \| null | The source of the pretraining. Can refer to popular pretraining datasets by name (i.e. Imagenet) or less known datasets by URL and description. If trained from scratch, the `null` value should be set explicitly. | +| mlm:batch_size_suggestion | integer | A suggested batch size for the accelerator and summarized hardware. | | mlm:accelerator | [Accelerator Enum](#accelerator-enum) \| null | The intended computational hardware that runs inference. If undefined or set to `null` explicitly, the model does not require any specific accelerator. | | mlm:accelerator_constrained | boolean | Indicates if the intended `accelerator` is the only `accelerator` that can run inference. If undefined, it should be assumed `false`. | | mlm:accelerator_summary | string | A high level description of the `accelerator`, such as its specific generation, or other relevant inference details. | | mlm:accelerator_count | integer | A minimum amount of `accelerator` instances required to run the model. | -| mlm:total_parameters | integer | Total number of model parameters, including trainable and non-trainable parameters. | -| mlm:pretrained_source | string \| null | The source of the pretraining. Can refer to popular pretraining datasets by name (i.e. Imagenet) or less known datasets by URL and description. If trained from scratch, the `null` value should be set explicitly. | -| mlm:batch_size_suggestion | number | A suggested batch size for the accelerator and summarized hardware. | -| mlm:input | [[Model Input Object](#model-input-object)] | **REQUIRED** Describes the transformation between the EO data and the model input. | -| mlm:output | [[Model Output Object](#model-output-object)] | **REQUIRED** Describes each model output and how to interpret it. | +| mlm:input | \[[Model Input Object](#model-input-object)] | **REQUIRED** Describes the transformation between the EO data and the model input. | +| mlm:output | \[[Model Output Object](#model-output-object)] | **REQUIRED** Describes each model output and how to interpret it. | To decide whether above fields should be applied under Item `properties` or under respective Assets, the context of each field must be considered. For example, the `mlm:name` should always be provided in the Item `properties`, since @@ -133,8 +133,8 @@ should be normalized to lowercase and use hyphens instead of spaces. | `instance-segmentation` | *n/a* | Specific segmentation task that assigns distinct labels for groups of pixels corresponding to object instances. | | `panoptic-segmentation` | *n/a* | Specific segmentation task that combines instance segmentation of objects and semantic labels for non-objects. | | `similarity-search` | *n/a* | Generic task to identify whether a query input corresponds to another reference within a corpus. | -| `image-captioning` | *n/a* | Specific task of describing the content of an image in words. | | `generative` | *n/a* | Generic task that encompasses all synthetic data generation techniques. | +| `image-captioning` | *n/a* | Specific task of describing the content of an image in words. | | `super-resolution` | *n/a* | Specific task that increases the quality and resolution of an image by increasing its high-frequency details. | If the task falls within the category of supervised machine learning and uses labels during training, @@ -154,6 +154,24 @@ describe what the model accomplishes. [stac-ext-label-props]: https://github.com/stac-extensions/label#item-properties +### Framework + +In most cases, this should correspond to common library names of well-established ML frameworks. +No explicit "Enum" is defined to allow easy addition of newer frameworks, but it is recommended +to use common names when applicable. Below are a few notable entries. + +- `PyTorch` +- `TensorFlow` +- `Scikit-learn` +- `Huggingface` +- `PyMC` +- `JAX` +- `ONNX` +- `MXNet` +- `Keras` +- `Caffe` +- `Weka` + ### Accelerator Type Enum It is recommended to define `accelerator` with one of the following values: @@ -180,22 +198,177 @@ set to `true`, there would be no `accelerator` to contain against. To avoid conf ### Model Input Object -| Field Name | Type | Description | -|-------------------------|---------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| name | string | **REQUIRED** Name of the input variable defined by the model. If no explicit name is defined by the model, an informative name (e.g.: "RGB Time Series") can be used instead. | -| bands | [string] | **REQUIRED** The names of the raster bands used to train or fine-tune the model, which may be all or a subset of bands available in a STAC Item's [Band Object](#bands-and-statistics). | -| input | [Input Structure Object](#input-structure-object) | **REQUIRED** The N-dimensional array definition that describes the shape, dimension ordering, and data type. | -| norm_by_channel | boolean | Whether to normalize each channel by channel-wise statistics or to normalize by dataset statistics. If True, use an array of `statistics` of same dimensionality and order as the `bands` field in this object. | -| norm_type | string \| null | Normalization method. Select one option from `"min_max"`, `"z_score"`, `"max_norm"`, `"mean_norm"`, `"unit_variance"`, `"norm_with_clip"` or `null` when none applies. | -| resize_type | string \| null | High-level descriptor of the rescaling method to change image shape. Select one option from `"crop"`, `"pad"`, `"interpolation"` or `null` when none applies. If your rescaling method combines more than one of these operations, provide the name of the operation instead. | -| statistics | [[Statistics Object](#bands-and-statistics)] | Dataset statistics for the training dataset used to normalize the inputs. | -| norm_with_clip_values | [integer] | If `norm_type = "norm_with_clip"` this array supplies a value that is less than the band maximum. The array must be the same length as `bands`, each value is used to divide each band before clipping values between 0 and 1. | -| pre_processing_function | string \| null | URI to the preprocessing function where normalization and rescaling takes place, and any other significant operations or, instead, the function code path, for example: `my_python_module_name:my_processing_function`. | +| Field Name | Type | Description | +|-------------------------|---------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| name | string | **REQUIRED** Name of the input variable defined by the model. If no explicit name is defined by the model, an informative name (e.g.: "RGB Time Series") can be used instead. | +| bands | \[string] | **REQUIRED** The names of the raster bands used to train or fine-tune the model, which may be all or a subset of bands available in a STAC Item's [Band Object](#bands-and-statistics). If no band applies for one input, use an empty array. | +| input | [Input Structure Object](#input-structure-object) | **REQUIRED** The N-dimensional array definition that describes the shape, dimension ordering, and data type. | +| norm_by_channel | boolean | Whether to normalize each channel by channel-wise statistics or to normalize by dataset statistics. If True, use an array of `statistics` of same dimensionality and order as the `bands` field in this object. | +| norm_type | [Normalize Enum](#normalize-enum) \| null | Normalization method. Select an appropriate option or `null` when none applies. Consider using `pre_processing_function` for custom implementations or more complex combinations. | +| norm_clip | \[number] | When `norm_type = "clip"`, this array supplies the value for each `bands` item, which is used to divide each band before clipping values between 0 and 1. | +| resize_type | [Resize Enum](#resize-enum) \| null | High-level descriptor of the rescaling method to change image shape. Select an appropriate option or `null` when none applies. Consider using `pre_processing_function` for custom implementations or more complex combinations. | +| statistics | \[[Statistics Object](#bands-and-statistics)] | Dataset statistics for the training dataset used to normalize the inputs. | +| pre_processing_function | [Processing Expression](#processing-expression) \| null | Custom preprocessing function where normalization and rescaling, and any other significant operations takes place. | Fields that accept the `null` value can be considered `null` when omitted entirely for parsing purposes. However, setting `null` explicitly when this information is known by the model provider can help users understand what is the expected behavior of the model. It is therefore recommended to provide `null` explicitly when applicable. +#### Bands and Statistics + +Depending on the supported `stac_version` and other `stac_extensions` employed by the STAC Item using MLM, +the [STAC 1.1 - Band Object][stac-1.1-band], +the [STAC Raster - Band Object][stac-raster-band] or +the [STAC EO - Band Object][stac-eo-band] can be used for +representing bands information, including notably the `nodata` value, +the `data_type` (see also [Data Type Enum](#data-type-enum)), +and [Common Band Names][stac-band-names]. + +Only bands used as input to the model should be included in the MLM `bands` field. +To avoid duplicating the information, MLM only uses the `name` of whichever "Band Object" is defined in the STAC Item. + +One distinction from the [STAC 1.1 - Band Object][stac-1.1-band] in MLM is that [Statistics][stac-1.1-stats] object +(or the corresponding [STAC Raster - Statistics][stac-raster-stats] for STAC 1.0) are not +defined at the "Band Object" level, but at the [Model Input](#model-input-object) level. +This is because, in machine learning, it is common to need overall statistics for the dataset used to train the model +to normalize all bands, rather than normalizing the values over a single product. Furthermore, statistics could be +applied differently for distinct [Model Input](#model-input-object) definitions, in order to adjust for intrinsic +properties of the model. + +[stac-1.1-band]: https://github.com/radiantearth/stac-spec/pull/1254 +[stac-1.1-stats]: https://github.com/radiantearth/stac-spec/blob/bands/item-spec/common-metadata.md#statistics-object +[stac-eo-band]: https://github.com/stac-extensions/eo?tab=readme-ov-file#band-object +[stac-raster-band]: https://github.com/stac-extensions/raster?tab=readme-ov-file#raster-band-object +[stac-raster-stats]: https://github.com/stac-extensions/raster?tab=readme-ov-file#statistics-object +[stac-band-names]: https://github.com/stac-extensions/eo?tab=readme-ov-file#common-band-names + +#### Data Type Enum + +When describing the `data_type` provided by a [Band](#bands-and-statistics), whether for defining +the [Input Structure](#input-structure-object) or the [Result Structure](#result-structure-object), +the [Data Types from the STAC Raster extension][raster-data-types] should be used if using STAC 1.0 or earlier, +and can use [Data Types from STAC 1.1 Core][stac-1.1-data-types] for later versions. +Both definitions should define equivalent values. + +[raster-data-types]: https://github.com/stac-extensions/raster?tab=readme-ov-file#data-types +[stac-1.1-data-types]: https://github.com/radiantearth/stac-spec/blob/bands/item-spec/common-metadata.md#data-types + +#### Input Structure Object + +| Field Name | Type | Description | +|------------|-----------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| shape | [integer] | **REQUIRED** Shape of the input n-dimensional array ($N \times C \times H \times W$), including the batch size dimension. Each dimension must either be greater than 0 or -1 to indicate a variable dimension size. | +| dim_order | string | **REQUIRED** How the above dimensions are ordered within the `shape`. `bhw`, `bchw`, `bthw`, `btchw` are valid orderings where `b`=batch, `c`=channel, `t`=time, `h`=height, `w`=width. | +| data_type | [Data Type Enum](#data-type-enum) | **REQUIRED** The data type of values in the n-dimensional array. For model inputs, this should be the data type of the processed input supplied to the model inference function, not the data type of the source bands. | + +A common use of `-1` for one dimension of `shape` is to indicate a variable batch-size. +However, this value is not strictly reserved for the `b` dimension. +For example, if the model is capable of automatically adjusting its input layer to adapt to the provided input data, +then the corresponding dimensions that can be adapted can employ `-1` as well. + +#### Normalize Enum + +Select one option from: +- `min-max` +- `z-score` +- `l1` +- `l2` +- `l2sqr` +- `hamming` +- `hamming2` +- `type-mask` +- `relative` +- `inf` +- `clip` + +See [OpenCV - Interpolation Flags](https://docs.opencv.org/4.x/da/d54/group__imgproc__transform.html#ga5bb5a1fea74ea38e1a5445ca803ff121) +for details about the relevant methods. Equivalent methods from other packages are applicable as well. + +If none of the above values applies, `null` (literal, not string) can be used instead. +If a custom normalization operation, or a combination of operations (with or without [Resize](#resize-enum)), +must be defined instead, consider using a [Processing Expression](#processing-expression) reference. + +#### Resize Enum + +Select one option from: +- `crop` +- `pad` +- `interpolation-nearest` +- `interpolation-linear` +- `interpolation-cubic` +- `interpolation-area` +- `interpolation-lanczos4` +- `interpolation-max` +- `wrap-fill-outliers` +- `wrap-inverse-map` + +See [OpenCV - Normalization Flags](https://docs.opencv.org/4.x/d2/de8/group__core__array.html#ga87eef7ee3970f86906d69a92cbf064bd) +for details about the relevant methods. Equivalent methods from other packages are applicable as well. + +If none of the above values applies, `null` (literal, not string) can be used instead. +If a custom rescaling operation, or a combination of operations (with or without [Normalization](#normalize-enum)), +must be defined instead, consider using a [Processing Expression](#processing-expression) reference. + +#### Processing Expression + +Taking inspiration from [Processing Extension - Expression Object][stac-proc-expr], the processing expression defines +at the very least a `format` and the applicable `expression` for it to perform pre/post-processing operations on MLM +inputs/outputs. + +| Field Name | Type | Description | +| ---------- | ------ | ----------- | +| format | string | **REQUIRED** The type of the expression that is specified in the `expression` property. | +| expression | \* | **REQUIRED** An expression compliant with the `format` specified. The expression can be any data type and depends on the `format` given, e.g. string or object. | + +On top of the examples already provided by [Processing Extension - Expression Object][stac-proc-expr], +the following formats are recommended as alternative scripts and function references. + +| Format | Type | Description | Expression Example | +|----------| ------ |----------------------------------------|------------------------------------------------------------------------------------------------------| +| `python` | string | A Python entry point reference. | `my_package.my_module:my_processing_function` or `my_package.my_module:MyClass.my_method` | +| `docker` | string | An URI with image and tag to a Docker. | `ghcr.io/NAMESPACE/IMAGE_NAME:latest` | +| `uri` | string | An URI to some binary or script. | `{"href": "https://raw.githubusercontent.com/ORG/REPO/TAG/package/cli.py", "type": "text/x-python"}` | + +> [!NOTE] +> Above definitions are only indicative, and more can be added as desired with even more custom definitions. +> It is left as an implementation detail for users to resolve how these expressions should be handled at runtime. + +> [!WARNING] +> See also discussion regarding additional processing expressions: +> [stac-extensions/processing#31](https://github.com/stac-extensions/processing/issues/31) + + +[stac-proc-expr]: https://github.com/stac-extensions/processing#expression-object + +### Model Output Object + +| Field Name | Type | Description | +|--------------------------|-----------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| tasks | \[[Task Enum](#task-enum)] | **REQUIRED** Specifies the Machine Learning tasks for which the output can be used for. This can be a subset of `mlm:tasks` defined under the Item `properties` as applicable. | +| result | [Result Structure Object](#result-structure-object) | The structure that describes the resulting output arrays/tensors from one model head. | +| classification:classes | \[[Class Object](#class-object)] | A list of class objects adhering to the [Classification Extension](https://github.com/stac-extensions/classification). | +| post_processing_function | [Processing Expression](#processing-expression) \| null | Custom postprocessing function where normalization and rescaling, and any other significant operations takes place. | + +While only `tasks` is a required field, all fields are recommended for tasks that produce a fixed +shape tensor and have output classes. Outputs that have variable dimensions, can define the `result` with the +appropriate dimension value `-1` in the `shape` field. When the model does not produce specific classes, such +as for `regression`, `image-captioning`, `super-resolution` and some `generative` tasks, to name a few, the +`classification:classes` can be omitted. + +#### Result Structure Object + +| Field Name | Type | Description | +|------------|-----------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| shape | \[integer] | **REQUIRED** Shape of the n-dimensional result array ($N \times H \times W$), possibly including a batch size dimension. The batch size dimension must either be greater than 0 or -1 to indicate an unspecified batch dimension size. | +| dim_names | \[string] | **REQUIRED** The names of the above dimensions of the result array, ordered the same as this object's `shape` field. | +| data_type | [Data Type Enum](#data-type-enum) | **REQUIRED** The data type of values in the n-dimensional array. For model outputs, this should be the data type of the result of the model inference without extra post processing. | + +#### Class Object + +See the documentation for the +[Class Object](https://github.com/stac-extensions/classification?tab=readme-ov-file#class-object). + + ## Assets Objects | Field Name | Type | Description | @@ -242,7 +415,6 @@ In order to provide more context, the following roles are also recommended were [ml-model-asset-roles]: https://github.com/stac-extensions/ml-model?tab=readme-ov-file#asset-objects - ### Model Asset | Field Name | Type | Description | @@ -250,7 +422,7 @@ In order to provide more context, the following roles are also recommended were | title | string | Description of the model asset. | | href | string | URI to the model artifact. | | type | string | The media type of the artifact (see [Model Artifact Media-Type](#model-artifact-media-type). | -| roles | [string] | **REQUIRED** Specify `mlm:model`. Can include `["mlm:weights", "mlm:checkpoint"]` as applicable. | +| roles | \[string] | **REQUIRED** Specify `mlm:model`. Can include `["mlm:weights", "mlm:checkpoint"]` as applicable. | | mlm:artifact_type | [Artifact Type Enum](#artifact-type-enum) | Specifies the kind of model artifact. Typically related to a particular ML framework. | Recommended Asset `roles` include `mlm:weights` or `mlm:checkpoint` for model weights that need to be loaded by a @@ -288,14 +460,14 @@ the users understand the source explicitly, although this is not strictly requir ### Source Code Asset -| Field Name | Type | Description | -|----------------|----------|-------------------------------------------------------------------------------| -| title | string | Title of the source code. | -| href | string | URI to the code repository, a ZIP archive, or an individual code/script file. | -| type | string | Media-type of the URI. | -| roles | [string] | **RECOMMENDED** Specify one or more of `["model", "code", "metadata"]` | -| description | string | Description of the source code. | -| mlm:entrypoint | string | Specific entrypoint reference in the code to use for running model inference. | +| Field Name | Type | Description | +|----------------|-----------|-------------------------------------------------------------------------------| +| title | string | Title of the source code. | +| href | string | URI to the code repository, a ZIP archive, or an individual code/script file. | +| type | string | Media-type of the URI. | +| roles | \[string] | **RECOMMENDED** Specify one or more of `["model", "code", "metadata"]` | +| description | string | Description of the source code. | +| mlm:entrypoint | string | Specific entrypoint reference in the code to use for running model inference. | If the referenced code does not directly offer a callable script to run the model, the `mlm:entrypoint` field should be added to the [Asset Object][stac-asset] in order to provide a pointer to the inference function to execute the model. @@ -319,24 +491,23 @@ since the source code asset might also refer to more detailed metadata than this ### Container Asset -| Field Name | Type | Description | -|-------------|----------|-----------------------------------------------------------------------------------| -| title | string | Description of the container. | -| href | string | URI of the published container, including the container registry, image and tag. | -| type | string | Media-type of the container, typically `application/vnd.oci.image.index.v1+json`. | -| roles | [string] | Specify `["runtime"]` and any other custom roles. | +| Field Name | Type | Description | +|-------------|-----------|-----------------------------------------------------------------------------------| +| title | string | Description of the container. | +| href | string | URI of the published container, including the container registry, image and tag. | +| type | string | Media-type of the container, typically `application/vnd.oci.image.index.v1+json`. | +| roles | \[string] | Specify `["runtime"]` and any other custom roles. | If you're unsure how to containerize your model, we suggest starting from the latest official container image for your framework that works with your model and pinning the container version. Examples: -[Pytorch Dockerhub](https://hub.docker.com/r/pytorch/pytorch/tags) -[Pytorch Docker Run Example](https://github.com/pytorch/pytorch?tab=readme-ov-file#docker-image) +- [Pytorch Dockerhub](https://hub.docker.com/r/pytorch/pytorch/tags) +- [Pytorch Docker Run Example](https://github.com/pytorch/pytorch?tab=readme-ov-file#docker-image) +- [Tensorflow Dockerhub](https://hub.docker.com/r/tensorflow/tensorflow/tags?page=8&ordering=last_updated) +- [Tensorflow Docker Run Example](https://www.tensorflow.org/install/docker#gpu_support) -[Tensorflow Dockerhub](https://hub.docker.com/r/tensorflow/tensorflow/tags?page=8&ordering=last_updated) -[Tensorflow Docker Run Example](https://www.tensorflow.org/install/docker#gpu_support) - -Using a base image for a framework looks like +Using a base image for a framework looks like: ```dockerfile # In your Dockerfile, pull the latest base image with all framework dependencies including accelerator drivers @@ -350,84 +521,6 @@ You can also use other base images. Pytorch and Tensorflow offer docker images f - [Torchserve](https://pytorch.org/serve/) - [TFServing](https://github.com/tensorflow/serving) - -#### Bands and Statistics - -Depending on the supported `stac_version` and other `stac_extensions` employed by the STAC Item using MLM, -the [STAC 1.1 - Band Object][stac-1.1-band], -the [STAC Raster - Band Object][stac-raster-band] or -the [STAC EO - Band Object][stac-eo-band] can be used for -representing bands information, including notably the `nodata` value, -the `data_type` (see also [Data Type Enum](#data-type-enum)), -and [Common Band Names][stac-band-names]. - -Only bands used as input to the model should be included in the MLM `bands` field. -To avoid duplicating the information, MLM only uses the `name` of whichever "Band Object" is defined in the STAC Item. - -One distinction from the [STAC 1.1 - Band Object][stac-1.1-band] in MLM is that [Statistics][stac-1.1-stats] object -(or the corresponding [STAC Raster - Statistics][stac-raster-stats] for STAC 1.0) are not -defined at the "Band Object" level, but at the [Model Input](#model-input-object) level. -This is because, in machine learning, it is common to need overall statistics for the dataset used to train the model -to normalize all bands, rather than normalizing the values over a single product. Furthermore, statistics could be -applied differently for distinct [Model Input](#model-input-object) definitions, in order to adjust for intrinsic -properties of the model. - -[stac-1.1-band]: https://github.com/radiantearth/stac-spec/pull/1254 -[stac-1.1-stats]: https://github.com/radiantearth/stac-spec/pull/1254/files#diff-2477b726f8c5d5d1c8b391be056db325e6918e78a24b414ccd757c7fbd574079R294 -[stac-eo-band]: https://github.com/stac-extensions/eo?tab=readme-ov-file#band-object -[stac-raster-band]: https://github.com/stac-extensions/raster?tab=readme-ov-file#raster-band-object -[stac-raster-stats]: https://github.com/stac-extensions/raster?tab=readme-ov-file#statistics-object -[stac-band-names]: https://github.com/stac-extensions/eo?tab=readme-ov-file#common-band-names - -#### Data Type Enum - -When describing the `data_type` provided by a [Band](#bands-and-statistics), whether for defining -the [Input Structure](#input-structure-object) or the [Result Structure](#result-structure-object), -the [Data Types from the STAC Raster extension][raster-data-types] should be used. - -[raster-data-types]: https://github.com/stac-extensions/raster?tab=readme-ov-file#data-types - -#### Input Structure Object - -| Field Name | Type | Description | -|------------|-----------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| shape | [integer] | **REQUIRED** Shape of the input n-dimensional array ($N \times C \times H \times W$), including the batch size dimension. Each dimension must either be greater than 0 or -1 to indicate a variable dimension size. | -| dim_order | string | **REQUIRED** How the above dimensions are ordered within the `shape`. `bhw`, `bchw`, `bthw`, `btchw` are valid orderings where `b`=batch, `c`=channel, `t`=time, `h`=height, `w`=width. | -| data_type | [Data Type Enum](#data-type-enum) | **REQUIRED** The data type of values in the n-dimensional array. For model inputs, this should be the data type of the processed input supplied to the model inference function, not the data type of the source bands. | - -A common use of `-1` for one dimension of `shape` is to indicate a variable batch-size. -However, this value is not strictly reserved for the `b` dimension. -For example, if the model is capable of automatically adjusting its input layer to adapt to the provided input data, -then the corresponding dimensions that can be adapted can employ `-1` as well. - -### Model Output Object - -| Field Name | Type | Description | -|--------------------------|-----------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| tasks | [[Task Enum](#task-enum)] | **REQUIRED** Specifies the Machine Learning tasks for which the output can be used for. This can be a subset of `mlm:tasks` defined under the Item `properties` as applicable. | -| result | [Result Structure Object](#result-structure-object) | The structure that describes the resulting output arrays/tensors from one model head. | -| classification:classes | [[Class Object](#class-object)] | A list of class objects adhering to the [Classification Extension](https://github.com/stac-extensions/classification). | -| post_processing_function | string | A url to the postprocessing function where normalization, rescaling, and other operations take place.. Or, instead, the function code path, for example: `my_package.my_module:my_processing_function`. | - -While only `tasks` is a required field, all fields are recommended for tasks that produce a fixed -shape tensor and have output classes. Outputs that have variable dimensions, can define the `result` with the -appropriate dimension value `-1` in the `shape` field. When the model does not produce specific classes, such -as for `regression`, `image-captioning`, `super-resolution` and some `generative` tasks, to name a few, the -`classification:classes` can be omitted. - -#### Result Structure Object - -| Field Name | Type | Description | -|------------|-----------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| shape | [integer] | **REQUIRED** Shape of the n-dimensional result array ($N \times H \times W$), possibly including a batch size dimension. The batch size dimension must either be greater than 0 or -1 to indicate an unspecified batch dimension size. | -| dim_names | [string] | **REQUIRED** The names of the above dimensions of the result array, ordered the same as this object's `shape` field. | -| data_type | [Data Type Enum](#data-type-enum) | **REQUIRED** The data type of values in the n-dimensional array. For model outputs, this should be the data type of the result of the model inference without extra post processing. | - -#### Class Object - -See the documentation for the -[Class Object](https://github.com/stac-extensions/classification?tab=readme-ov-file#class-object). - ## Relation types The following types should be used as applicable `rel` types in the diff --git a/json-schema/schema.json b/json-schema/schema.json index 6a2da45..487d218 100644 --- a/json-schema/schema.json +++ b/json-schema/schema.json @@ -1,6 +1,6 @@ { "$schema": "http://json-schema.org/draft-07/schema#", - "$id": "https://schemas.stacspec.org/v1.0.0/extensions/mlm/json-schema/schema.json", + "$id": "https://stac-extensions.github.io/mlm/v1.0.0/schema.json", "title": "DL Model Item", "description": "This object represents the metadata for a Machine Learning Model (MLM).", "oneOf": [ @@ -31,20 +31,20 @@ ] }, { - "$ref": "#/definitions/fields" + "$ref": "#/$defs/fields" } ] }, "assets": { "type": "object", "additionalProperties": { - "$ref": "#/definitions/fields" + "$ref": "#/$defs/fields" } } } }, { - "$ref": "#/definitions/stac_extensions_mlm" + "$ref": "#/$defs/stac_extensions_mlm" } ] }, @@ -54,25 +54,33 @@ { "type": "object", "required": [ - "type", - "summaries" + "type" ], "properties": { "type": { "const": "Collection" }, + "summaries": { + "type": "object", + "additionalProperties": { + "$ref": "#/$defs/fields" + } + }, "assets": { "type": "object", "additionalProperties": { - "$ref": "#/definitions/fields" + "$ref": "#/$defs/fields" } } } + }, + { + "$ref": "#/$defs/stac_extensions_mlm" } ] } ], - "definitions": { + "$defs": { "stac_extensions_mlm": { "type": "object", "required": [ @@ -82,54 +90,497 @@ "stac_extensions": { "type": "array", "contains": { - "const": "https://schemas.stacspec.org/v1.0.0/extensions/mlm/json-schema/schema.json" + "const": "https://stac-extensions.github.io/mlm/v1.0.0/schema.json" } } } }, - "fields": { - - - + "stac_extensions_eo": { + "type": "object", + "required": [ + "stac_extensions" + ], + "properties": { + "stac_extensions": { + "type": "array", + "contains": { + "type": "string", + "pattern": "https://stac-extensions\\.github\\.io/eo/v1(\\.[0-9]+){2}/schema\\.json" + } + } + } }, + "stac_extensions_raster": { + "type": "object", + "required": [ + "stac_extensions" + ], "properties": { - "properties": { - "type": "object", - "required": [ - "mlm:name", - "mlm:input", - "mlm:output", - "dlm:runtime", - "dlm:archive", - "dlm:data" - ], - "properties": { - "dlm:inputs": { - "$ref": "#/definitions/dlm:inputs" - }, - "dlm:outputs": { - "$ref": "#/definitions/dlm:outputs" - }, - "dlm:runtime": { - "$ref": "#/definitions/dlm:runtime" - }, - "dlm:architecture": { - "$ref": "#/definitions/dlm:architecture" - }, - "dlm:archive": { - "$ref": "#/definitions/dlm:archive" - }, - "dlm:data": { - "$ref": "#/definitions/dlm:data" - } + "stac_extensions": { + "type": "array", + "contains": { + "type": "string", + "pattern": "https://stac-extensions\\.github\\.io/raster/v1(\\.[0-9]+){2}/schema\\.json" } } + } + }, + "stac_version_1.1": { + "$comment": "Requirement for STAC 1.1 or above.", + "type": "object", + "required": [ + "stac_version" + ], + "properties": { + "stac_version": { + "pattern": "1\\.[1-9][0-9]*\\.[0-9]+(-.*)?" + } + } + }, + "fields": { + "type": "object", + "properties": { + "mlm:name": { + "$ref": "#/$defs/mlm:name" + }, + "mlm:architecture": { + "$ref": "#/$defs/mlm:architecture" + }, + "mlm:tasks": { + "$ref": "#/$defs/mlm:tasks" + }, + "mlm:framework": { + "$ref": "#/$defs/mlm:framework" + }, + "mlm:framework_version": { + "$ref": "#/$defs/mlm:framework_version" + }, + "mlm:memory_size": { + "$ref": "#/$defs/mlm:memory_size" + }, + "mlm:total_parameters": { + "$ref": "#/$defs/mlm:total_parameters" + }, + "mlm:pretrained_source": { + "$ref": "#/$defs/mlm:pretrained_source" + }, + "mlm:batch_size_suggestion": { + "$ref": "#/$defs/mlm:batch_size_suggestion" + }, + "mlm:accelerator": { + "$ref": "#/$defs/mlm:accelerator" + }, + "mlm:accelerator_constrained": { + "$ref": "#/$defs/mlm:accelerator_constrained" + }, + "mlm:accelerator_summary": { + "$ref": "#/$defs/mlm:accelerator_summary" + }, + "mlm:accelerator_count": { + "$ref": "#/$defs/mlm:accelerator_count" + }, + "mlm:input": { + "$ref": "#/$defs/mlm:input" + }, + "mlm:output": { + "$ref": "#/$defs/mlm:output" + } }, + "$comment": "Allow properties not defined by MLM prefix to allow combination with other extensions.", "patternProperties": { "^(?!dlm:)": {} }, "additionalProperties": false }, + "mlm:name": { + "type": "string", + "pattern": "^[a-zA-Z][a-zA-Z0-9_.-]+[a-zA-Z0-9]$" + }, + "mlm:architecture": { + "type": "string", + "title": "Model Architecture", + "description": "A descriptive name of the model architecture, typically a common name from the literature.", + "examples": [ + "ResNet", + "VGG", + "GAN", + "Vision Transformer" + ] + }, + "mlm:framework": { + "title": "Name of the machine learning framework used.", + "anyOf": [ + { + "$comment": "Add more entries here as needed, and repeat them in the README.", + "description": "Notable predefined framework names.", + "type": "string", + "enum": [ + "PyTorch", + "TensorFlow", + "Scikit-learn", + "Huggingface", + "PyMC", + "JAX", + "ONNX", + "MXNet", + "Keras", + "Caffe", + "Weka" + ] + }, + { + "type": "string", + "minLength": 1, + "description": "Any other framework name to allow extension. Enum names should be preferred when possible to allow better portability." + } + ] + }, + "mlm:framework_version": { + "title": "Framework version", + "type": "string", + "pattern": "^(0|[1-9]\\d*)\\.(0|[1-9]\\d*)\\.(0|[1-9]\\d*)(?:-((?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\\.(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\\+([0-9a-zA-Z-]+(?:\\.[0-9a-zA-Z-]+)*))?$" + }, + "mlm:tasks": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "regression", + "classification", + "scene-classification", + "detection", + "object-detection", + "segmentation", + "semantic-segmentation", + "instance-segmentation", + "panoptic-segmentation", + "similarity-search", + "generative", + "image-captioning", + "super-resolution" + ] + } + }, + "mlm:memory_size": { + "description": "Memory size (in bytes) required to load the model with the specified accelerator.", + "type": "integer", + "minimum": 0 + }, + "mlm:total_parameters": { + "description": "Total number of model parameters (weights).", + "type": "integer", + "minimum": 0 + }, + "mlm:pretrained_source": { + "description": "Pre-training dataset reference or training from scratch definition.", + "oneOf": [ + { + "type": "string", + "description": "The name or URI of the dataset used for pretraining the model.", + "examples": [ + "ImageNet", + "EuroSAT" + ] + }, + { + "type": "null", + "description": "Explicit mention that the model is trained from scratch." + } + ] + }, + "mlm:batch_size_suggestion": { + "description": "Recommended batch size to employ the model with the accelerator.", + "type": "integer", + "minimum": 0 + }, + "mlm:accelerator": { + "oneOf": [ + { + "type": "string", + "enum": [ + "amd64", + "cuda", + "xla", + "amd-rocm", + "intel-ipex-cpu", + "intel-ipex-gpu", + "macos-arm" + ] + }, + { + "type": "null" + } + ], + "default": null + }, + "mlm:accelerator_constrained": { + "type": "boolean", + "default": false + }, + "mlm:accelerator_summary": { + "type": "string" + }, + "mlm:accelerator_count": { + "type": "integer", + "minimum": 1 + }, + "mlm:input": { + "type": "array", + "items": { + "title": "Model Input Object", + "type": "object", + "required": [ + "name", + "bands", + "input" + ], + "properties": { + "name": { + "type": "string", + "minLength": 1 + }, + "bands": { + "$ref": "#/$defs/ModelBands" + }, + "input": { + "$ref": "#/$defs/InputStructure" + }, + "norm_by_channel": { + "type": "boolean" + }, + "norm_type": { + "$ref": "#/$defs/NormalizeType" + }, + "norm_clip": { + "$ref": "#/$defs/NormalizeClip" + }, + "resize_type": { + "$ref": "#/$defs/ResizeType" + }, + "statistics": { + "$ref": "#/$defs/statistics" + }, + "pre_processing_function": { + "$ref": "https://stac-extensions.github.io/processing/v1.1.0/schema.json#/definitions/fields/properties/processing:expression" + } + } + } + }, + "InputStructure": { + "title": "Input Structure Object", + "type": "object", + "required": [ + "shape", + "dim_order", + "data_type" + ], + "properties": { + "shape": { + "type": "array", + "items": { + "type": "integer", + "minimum": -1 + } + }, + "dim_order": { + "type": "string", + "minLength": 1 + }, + "data_type": { + "$ref": "#/$defs/DataType" + } + } + }, + "NormalizeType": { + "oneOf": [ + { + "type": "string", + "enum": [ + "min-max", + "z-score", + "l1", + "l2", + "l2sqr", + "hamming", + "hamming2", + "type-mask", + "relative", + "inf" + ] + }, + { + "type": "null" + } + ] + }, + "NormalizeClip": { + + }, + "ResizeType": { + "oneOf": [ + { + "type": "string", + "enum": [ + "crop", + "pad", + "interpolation-nearest", + "interpolation-linear", + "interpolation-cubic", + "interpolation-area", + "interpolation-lanczos4", + "interpolation-max", + "wrap-fill-outliers", + "wrap-inverse-map" + ] + }, + { + "type": "null" + } + ] + }, + "DataType": { + "$ref": "https://stac-extensions.github.io/raster/v1.1.0/schema.json#/definitions/bands/items/properties/data_type" + }, + "ModelBands": { + "allOf": [ + { + "$comment": "No 'minItems' here since to support model inputs not using any band (other data source).", + "type": "array", + "items": { + "type": "string", + "minLength": 1 + } + }, + { + "$comment": "However, if any band is indicated, a 'bands'-compliant section should describe them.", + "$ref": "#/$defs/AnyBandsRef" + } + ] + }, + "AnyBandsRef": { + "$comment": "This definition ensures that, if at least 1 named MLM 'bands' is provided, at least 1 of the supported references from EO, Raster or STAC Core 1.1 are provided as well.", + "if": { + "$comment": "This is the JSON-object 'properties' definition.", + "properties": { + "$comment": "This is the STAC-Item 'properties' field.", + "properties": { + "required": [ + "mlm:input" + ], + "$comment": "This is the JSON-object 'properties' definition for the STAC Item 'properties' field.", + "properties": { + "$comment": "Required MLM bands listing referring to at least one band name.", + "mlm:input": { + "type": "array", + "items": { + "required": [ + "bands" + ], + "$comment": "This is the 'Model Input Object' properties.", + "properties": { + "bands": { + "type": "array", + "items": { + "type": "string", + "$comment": "This 'minItems' is the purpose of this whole 'if/then' block.", + "minItems": 1 + } + } + } + } + } + } + } + } + }, + "then": { + "$comment": "Need at least one 'bands', but multiple is allowed.", + "anyOf": [ + { + "allOf": [ + { + "$ref": "#/$defs/stac_extensions_raster" + }, + { + "$comment": "This is the JSON-object 'properties' definition.", + "properties": { + "$comment": "This is the STAC-Item 'properties' field.", + "properties": { + "required": ["raster:bands"], + "$comment": "This is the JSON-object 'properties' definition for the STAC Item 'properties' field.", + "properties": { + "$comment": "https://github.com/stac-extensions/raster#item-asset-fields", + "raster:bands": { + "type": "array", + "minItems": 1, + "items": { + "type": "object" + } + } + } + } + } + } + ] + }, + { + "allOf": [ + { + "$ref": "#/$defs/stac_extensions_eo" + }, + { + "$comment": "This is the JSON-object 'properties' definition.", + "properties": { + "$comment": "This is the STAC-Item 'properties' field.", + "properties": { + "required": ["eo:bands"], + "$comment": "This is the JSON-object 'properties' definition for the STAC Item 'properties' field.", + "properties": { + "$comment": "https://github.com/stac-extensions/eo#item-properties-or-asset-fields", + "eo:bands": { + "type": "array", + "minItems": 1, + "items": { + "type": "object" + } + } + } + } + } + } + ] + }, + { + "allOf": [ + { + "$ref": "#/$defs/stac_version_1.1" + }, + { + "$comment": "This is the JSON-object 'properties' definition.", + "properties": { + "$comment": "This is the STAC-Item 'properties' field.", + "properties": { + "required": ["bands"], + "$comment": "This is the JSON-object 'properties' definition for the STAC Item 'properties' field.", + "properties": { + "$comment": "https://github.com/radiantearth/stac-spec/blob/bands/item-spec/common-metadata.md#bands", + "bands": { + "type": "array", + "minItems": 1, + "items": { + "type": "object" + } + } + } + } + } + } + ] + } + ] + } + }, + "dlm:runtime": { "title": "Execution environment", "description": "Describe the execution environment", @@ -142,30 +593,7 @@ "requirement_file" ], "properties": { - "framework": { - "title": "Name of the deep learning framework used", - "type": "string" - }, - "framework_version": { - "title": "Framework version", - "type": "string" - }, - "model_handler": { - "title": "Model handling function", - "type": "string" - }, - "model_src_url": { - "title": "Model source repository", - "type": "string" - }, - "model_commit_hash": { - "title": "Hash value for the Model source", - "type": "string" - }, - "requirement_file": { - "title": "Requirement file", - "type": "string" - }, + "docker": { "title": "Docker runtime specifications", "type": "object", @@ -399,10 +827,10 @@ "description": "This is a lookup table mapping the model output (index) to a class name", "oneOf": [ { - "$ref": "#/definitions/dlm:class_name_listing" + "$ref": "#/$defs/dlm:class_name_listing" }, { - "$ref": "#/definitions/dlm:class_name_mapping" + "$ref": "#/$defs/dlm:class_name_mapping" } ] } @@ -413,26 +841,26 @@ "type": "array", "minItems": 1, "items": { - "oneOf": [ - { - "type": "object", - "properties": { - "index": { - "title": "Class index", - "type": "integer", - "minimum": 0 - }, - "class_name": { - "title": "Class name", - "type": "string" - } - } - }, - { - "$ref": "#/definitions/dlm:class_name_mapping" - } - ] - } + "oneOf": [ + { + "type": "object", + "properties": { + "index": { + "title": "Class index", + "type": "integer", + "minimum": 0 + }, + "class_name": { + "title": "Class name", + "type": "string" + } + } + }, + { + "$ref": "#/$defs/dlm:class_name_mapping" + } + ] + } }, "dlm:class_name_mapping": { "type": "object", @@ -584,4 +1012,5 @@ } } } + } } diff --git a/stac_model/schema.py b/stac_model/schema.py index 03b7c50..4f41603 100644 --- a/stac_model/schema.py +++ b/stac_model/schema.py @@ -33,8 +33,7 @@ ) SchemaName = Literal["mlm"] -# TODO update -SCHEMA_URI: str = "https://raw.githubusercontent.com/crim-ca/dlm-extension/main/json-schema/schema.json" # noqa: E501 +SCHEMA_URI: str = "https://stac-extensions.github.io/mlm/v1.0.0/schema.json" PREFIX = f"{get_args(SchemaName)[0]}:" @@ -90,17 +89,6 @@ def apply( def get_schema_uri(cls) -> str: return SCHEMA_URI - @classmethod - def has_extension(cls, obj: S): - # FIXME: this override should be removed once an official and - # versioned schema is released ignore the original implementation - # logic for a version regex since in our case, the VERSION_REGEX - # is not fulfilled (ie: using 'main' branch, no tag available...) - ext_uri = cls.get_schema_uri() - return obj.stac_extensions is not None and any( - uri == ext_uri for uri in obj.stac_extensions - ) - @classmethod def ext(cls, obj: T, add_if_missing: bool = False) -> "MLModelExtension[T]": """Extends the given STAC Object with properties from the diff --git a/tests/test_schema.py b/tests/test_schema.py index 20154f8..45d6e36 100644 --- a/tests/test_schema.py +++ b/tests/test_schema.py @@ -8,9 +8,11 @@ def mlmodel_metadata_item(): model_metadata_stac_item = eurosat_resnet() return model_metadata_stac_item + def test_model_metadata_to_dict(mlmodel_metadata_item): assert mlmodel_metadata_item.item.to_dict() + def test_validate_model_metadata(mlmodel_metadata_item): import pystac assert pystac.read_dict(mlmodel_metadata_item.item.to_dict()) From 8aca9b38ca0a0be2e04bac701f3d213fa5a29624 Mon Sep 17 00:00:00 2001 From: Francis Charette-Migneault Date: Thu, 4 Apr 2024 13:14:52 -0400 Subject: [PATCH 079/112] more schema adjustments --- .gitignore | 1 + examples/example.json | 497 ++++++++++++++++++---------------- json-schema/schema.json | 586 ++++++++++------------------------------ poetry.lock | 340 +++++++++++------------ pyproject.toml | 2 +- stac_model/examples.py | 10 +- stac_model/input.py | 2 +- stac_model/output.py | 4 +- tests/conftest.py | 50 ++++ tests/test_schema.py | 21 +- 10 files changed, 654 insertions(+), 859 deletions(-) create mode 100644 tests/conftest.py diff --git a/.gitignore b/.gitignore index a556189..3204cbe 100644 --- a/.gitignore +++ b/.gitignore @@ -2,6 +2,7 @@ Untitled.ipynb /package-lock.json /node_modules .vscode +.idea ### ArchLinuxPackages ### *.tar diff --git a/examples/example.json b/examples/example.json index 21672ed..ba10bba 100644 --- a/examples/example.json +++ b/examples/example.json @@ -1,236 +1,275 @@ { - "type": "Feature", - "stac_version": "1.0.0", - "id": "resnet-18_sentinel-2_all_moco_classification", - "properties": { - "start_datetime": "1900-01-01", - "end_datetime": null, - "mlm:name": "Resnet-18 Sentinel-2 ALL MOCO", - "mlm:task": "classification", - "mlm:framework": "pytorch", - "mlm:framework_version": "2.1.2+cu121", - "mlm:file_size": 43000000, - "mlm:memory_size": 1, - "mlm:input": [ - { - "name": "13 Band Sentinel-2 Batch", - "bands": [ - "B01", - "B02", - "B03", - "B04", - "B05", - "B06", - "B07", - "B08", - "B8A", - "B09", - "B10", - "B11", - "B12" - ], - "input_array": { - "shape": [ - -1, - 13, - 64, - 64 - ], - "dim_order": "bchw", - "data_type": "float32" - }, - "norm_by_channel": true, - "norm_type": "z_score", - "resize_type": "none", - "parameters": null, - "statistics": { - "minimum": null, - "maximum": null, - "mean": [ - 1354.40546513, - 1118.24399958, - 1042.92983953, - 947.62620298, - 1199.47283961, - 1999.79090914, - 2369.22292565, - 2296.82608323, - 732.08340178, - 12.11327804, - 1819.01027855, - 1118.92391149, - 2594.14080798 - ], - "stddev": [ - 245.71762908, - 333.00778264, - 395.09249139, - 593.75055589, - 566.4170017, - 861.18399006, - 1086.63139075, - 1117.98170791, - 404.91978886, - 4.77584468, - 1002.58768311, - 761.30323499, - 1231.58581042 - ], - "count": null, - "valid_percent": null - }, - "norm_with_clip_values": null, - "pre_processing_function": "torchgeo.datamodules.eurosat.EuroSATDataModule.collate_fn" - } + "type": "Feature", + "stac_version": "1.0.0", + "id": "resnet-18_sentinel-2_all_moco_classification", + "geometry": { + "type": "Polygon", + "coordinates": [ + [ + [ + -7.882190080512502, + 37.13739173208318 ], - "mlm:output": [ - { - "task": "classification", - "result_array": [ - { - "shape": [ - -1, - 10 - ], - "dim_names": [ - "batch", - "class" - ], - "data_type": "float32" - } - ], - "classification_classes": [ - { - "value": 0, - "name": "Annual Crop", - "description": null, - "title": null, - "color_hint": null, - "nodata": false - }, - { - "value": 1, - "name": "Forest", - "description": null, - "title": null, - "color_hint": null, - "nodata": false - }, - { - "value": 2, - "name": "Herbaceous Vegetation", - "description": null, - "title": null, - "color_hint": null, - "nodata": false - }, - { - "value": 3, - "name": "Highway", - "description": null, - "title": null, - "color_hint": null, - "nodata": false - }, - { - "value": 4, - "name": "Industrial Buildings", - "description": null, - "title": null, - "color_hint": null, - "nodata": false - }, - { - "value": 5, - "name": "Pasture", - "description": null, - "title": null, - "color_hint": null, - "nodata": false - }, - { - "value": 6, - "name": "Permanent Crop", - "description": null, - "title": null, - "color_hint": null, - "nodata": false - }, - { - "value": 7, - "name": "Residential Buildings", - "description": null, - "title": null, - "color_hint": null, - "nodata": false - }, - { - "value": 8, - "name": "River", - "description": null, - "title": null, - "color_hint": null, - "nodata": false - }, - { - "value": 9, - "name": "SeaLake", - "description": null, - "title": null, - "color_hint": null, - "nodata": false - } - ], - "post_processing_function": null - } + [ + -7.882190080512502, + 58.21798141355221 ], - "mlm:runtime": [ - { - "asset": { - "href": "https://huggingface.co/torchgeo/resnet18_sentinel2_all_moco/resolve/main/resnet18_sentinel2_all_moco-59bfdff9.pth", - "title": "Pytorch weights checkpoint", - "description": "A Resnet-18 classification model trained on normalized Sentinel-2 imagery with Eurosat landcover labels with torchgeo", - "type": ".pth", - "roles": [ - "weights" - ] - }, - "source_code": { - "href": "https://github.com/microsoft/torchgeo/blob/61efd2e2c4df7ebe3bd03002ebbaeaa3cfe9885a/torchgeo/models/resnet.py#L207", - "title": null, - "description": null, - "type": null, - "roles": null - }, - "accelerator": "cuda", - "accelerator_constrained": false, - "hardware_summary": "Unknown", - "container": null, - "commit_hash": "61efd2e2c4df7ebe3bd03002ebbaeaa3cfe9885a", - "batch_size_suggestion": null - } + [ + 27.911651652899925, + 58.21798141355221 ], - "mlm:total_parameters": 11700000, - "mlm:pretrained_source": "EuroSat Sentinel-2", - "mlm:summary": "Sourced from torchgeo python library,identifier is ResNet18_Weights.SENTINEL2_ALL_MOCO", - "datetime": null - }, - "geometry": "{\n \"type\": \"Polygon\",\n \"coordinates\": [\n [\n [\n -7.882190080512502,\n 37.13739173208318\n ],\n [\n -7.882190080512502,\n 58.21798141355221\n ],\n [\n 27.911651652899923,\n 58.21798141355221\n ],\n [\n 27.911651652899923,\n 37.13739173208318\n ],\n [\n -7.882190080512502,\n 37.13739173208318\n ]\n ]\n ]\n}", - "links": [ - { - "rel": "derived_from", - "href": "https://earth-search.aws.element84.com/v1/collections/sentinel-2-l2a", - "type": "application/json" - } + [ + 27.911651652899925, + 37.13739173208318 + ], + [ + -7.882190080512502, + 37.13739173208318 + ] + ] + ] + }, + "properties": { + "start_datetime": "1900-01-01", + "end_datetime": "9999-12-31T23:59:59Z", + "mlm:name": "Resnet-18 Sentinel-2 ALL MOCO", + "mlm:tasks": [ + "classification" ], - "assets": {}, - "bbox": [ - -7.882190080512502, - 37.13739173208318, - 27.911651652899923, - 58.21798141355221 + "mlm:framework": "pytorch", + "mlm:framework_version": "2.1.2+cu121", + "mlm:file_size": 43000000, + "mlm:memory_size": 1, + "mlm:total_parameters": 11700000, + "mlm:pretrained_source": "EuroSat Sentinel-2", + "mlm:summary": "Sourced from torchgeo python library, identifier is ResNet18_Weights.SENTINEL2_ALL_MOCO", + "mlm:accelerator": "cuda", + "mlm:accelerator_constrained": false, + "mlm:accelerator_summary": "Unknown", + "mlm:batch_size_suggestion": null, + "mlm:input": [ + { + "name": "13 Band Sentinel-2 Batch", + "bands": [ + "B01", + "B02", + "B03", + "B04", + "B05", + "B06", + "B07", + "B08", + "B8A", + "B09", + "B10", + "B11", + "B12" + ], + "input": { + "shape": [ + -1, + 13, + 64, + 64 + ], + "dim_order": [ + "batch", + "channel", + "height", + "width" + ], + "data_type": "float32" + }, + "norm_by_channel": true, + "norm_type": "z-score", + "resize_type": null, + "parameters": null, + "statistics": { + "minimum": null, + "maximum": null, + "mean": [ + 1354.40546513, + 1118.24399958, + 1042.92983953, + 947.62620298, + 1199.47283961, + 1999.79090914, + 2369.22292565, + 2296.82608323, + 732.08340178, + 12.11327804, + 1819.01027855, + 1118.92391149, + 2594.14080798 + ], + "stddev": [ + 245.71762908, + 333.00778264, + 395.09249139, + 593.75055589, + 566.4170017, + 861.18399006, + 1086.63139075, + 1117.98170791, + 404.91978886, + 4.77584468, + 1002.58768311, + 761.30323499, + 1231.58581042 + ], + "count": null, + "valid_percent": null + }, + "norm_with_clip_values": null, + "pre_processing_function": { + "format": "python", + "expression": "torchgeo.datamodules.eurosat.EuroSATDataModule.collate_fn" + } + } ], - "stac_extensions": [ - "https://raw.githubusercontent.com/crim-ca/dlm-extension/main/json-schema/schema.json" + "mlm:output": [ + { + "name": "classification", + "tasks": [ + "classification" + ], + "result": [ + { + "shape": [ + -1, + 10 + ], + "dim_order": [ + "batch", + "class" + ], + "data_type": "float32" + } + ], + "classification_classes": [ + { + "value": 0, + "name": "Annual Crop", + "description": null, + "title": null, + "color_hint": null, + "nodata": false + }, + { + "value": 1, + "name": "Forest", + "description": null, + "title": null, + "color_hint": null, + "nodata": false + }, + { + "value": 2, + "name": "Herbaceous Vegetation", + "description": null, + "title": null, + "color_hint": null, + "nodata": false + }, + { + "value": 3, + "name": "Highway", + "description": null, + "title": null, + "color_hint": null, + "nodata": false + }, + { + "value": 4, + "name": "Industrial Buildings", + "description": null, + "title": null, + "color_hint": null, + "nodata": false + }, + { + "value": 5, + "name": "Pasture", + "description": null, + "title": null, + "color_hint": null, + "nodata": false + }, + { + "value": 6, + "name": "Permanent Crop", + "description": null, + "title": null, + "color_hint": null, + "nodata": false + }, + { + "value": 7, + "name": "Residential Buildings", + "description": null, + "title": null, + "color_hint": null, + "nodata": false + }, + { + "value": 8, + "name": "River", + "description": null, + "title": null, + "color_hint": null, + "nodata": false + }, + { + "value": 9, + "name": "SeaLake", + "description": null, + "title": null, + "color_hint": null, + "nodata": false + } + ], + "post_processing_function": null + } ] -} \ No newline at end of file + }, + "links": [ + { + "rel": "derived_from", + "href": "https://earth-search.aws.element84.com/v1/collections/sentinel-2-l2a", + "type": "application/json" + } + ], + "assets": { + "weights": { + "href": "https://huggingface.co/torchgeo/resnet18_sentinel2_all_moco/resolve/main/resnet18_sentinel2_all_moco-59bfdff9.pth", + "title": "Pytorch weights checkpoint", + "description": "A Resnet-18 classification model trained on normalized Sentinel-2 imagery with Eurosat landcover labels with torchgeo", + "type": ".pth", + "roles": [ + "mlm:model", + "mlm:weights" + ] + }, + "source_code": { + "href": "https://github.com/microsoft/torchgeo/blob/61efd2e2c4df7ebe3bd03002ebbaeaa3cfe9885a/torchgeo/models/resnet.py#L207", + "title": null, + "description": null, + "type": null, + "roles": [ + "mlm:model", + "code", + "metadata" + ] + } + }, + "datetime": null, + "bbox": [ + -7.882190080512502, + 37.13739173208318, + 27.911651652899923, + 58.21798141355221 + ], + "stac_extensions": [ + "https://raw.githubusercontent.com/crim-ca/dlm-extension/main/json-schema/schema.json" + ] +} diff --git a/json-schema/schema.json b/json-schema/schema.json index 487d218..c039238 100644 --- a/json-schema/schema.json +++ b/json-schema/schema.json @@ -161,6 +161,9 @@ "mlm:total_parameters": { "$ref": "#/$defs/mlm:total_parameters" }, + "mlm:pretrained": { + "$ref": "#/$defs/mlm:pretrained" + }, "mlm:pretrained_source": { "$ref": "#/$defs/mlm:pretrained_source" }, @@ -242,6 +245,7 @@ }, "mlm:tasks": { "type": "array", + "uniqueItems": true, "items": { "type": "string", "enum": [ @@ -271,6 +275,41 @@ "type": "integer", "minimum": 0 }, + "mlm:pretrained": { + "type": "boolean", + "$comment": "If trained from scratch, the source should be explicitly 'null'. However, omitting the source if pretrained is allowed.", + "if": { + "$comment": "This is the JSON-object 'properties' definition.", + "properties": { + "$comment": "This is the STAC-Item 'properties' field.", + "properties": { + "$comment": "This is the JSON-object 'properties' definition for the STAC Item 'properties' field.", + "properties": { + "$comment": "Required MLM pretraining reference.", + "mlm:pretrained": { + "const": false + } + } + } + } + }, + "then": { + "$comment": "This is the JSON-object 'properties' definition.", + "properties": { + "$comment": "This is the STAC-Item 'properties' field.", + "properties": { + "$comment": "This is the JSON-object 'properties' definition for the STAC Item 'properties' field.", + "required": ["mlm:pretrained_source"], + "properties": { + "$comment": "Required MLM pretraining reference.", + "mlm:pretrained_source": { + "const": null + } + } + } + } + } + }, "mlm:pretrained_source": { "description": "Pre-training dataset reference or training from scratch definition.", "oneOf": [ @@ -358,10 +397,40 @@ "$ref": "#/$defs/ResizeType" }, "statistics": { - "$ref": "#/$defs/statistics" + "$ref": "#/$defs/InputStatistics" }, "pre_processing_function": { - "$ref": "https://stac-extensions.github.io/processing/v1.1.0/schema.json#/definitions/fields/properties/processing:expression" + "$ref": "#/$defs/ProcessingExpression" + } + } + } + }, + "mlm:output": { + "type": "array", + "items": { + "title": "Model Output Object", + "type": "object", + "required": [ + "name", + "tasks", + "result" + ], + "properties": { + "name": { + "type": "string", + "minLength": 1 + }, + "tasks": { + "$ref": "#/$defs/mlm:tasks" + }, + "result": { + "$ref": "#/$defs/ResultStructure" + }, + "classification:classes": { + "$ref": "#/$defs/ClassificationClasses" + }, + "post_processing_function": { + "$ref": "#/$defs/ProcessingExpression" } } } @@ -376,21 +445,66 @@ ], "properties": { "shape": { - "type": "array", - "items": { - "type": "integer", - "minimum": -1 - } + "$ref": "#/$defs/DimensionShape" }, "dim_order": { - "type": "string", - "minLength": 1 + "$ref": "#/$defs/DimensionOrder" }, "data_type": { "$ref": "#/$defs/DataType" } } }, + "ResultStructure": { + "title": "Result Structure Object", + "type": "object", + "required": [ + "shape", + "dim_order", + "data_type" + ], + "properties": { + "shape": { + "$ref": "#/$defs/DimensionShape" + }, + "dim_order": { + "$ref": "#/$defs/DimensionOrder" + }, + "data_type": { + "$ref": "#/$defs/DataType" + } + } + }, + "DimensionShape": { + "type": "array", + "minItems": 1, + "items": { + "type": "integer", + "minimum": -1 + } + }, + "DimensionOrder": { + "type": "array", + "minItems": 1, + "uniqueItems": true, + "items": { + "type": "string", + "minLength": 1, + "pattern": "^[a-z-_]+$", + "examples": [ + "batch", + "channel", + "time", + "height", + "width", + "depth", + "token", + "class", + "score", + "confidence" + ] + } + }, "NormalizeType": { "oneOf": [ { @@ -438,6 +552,28 @@ } ] }, + "ClassificationClasses": { + "$comment": "Must allow empty array for outputs that provide other predictions than classes.", + "oneOf": [ + { + "$ref": "https://stac-extensions.github.io/classification/v1.1.0/schema.json#/definitions/fields/properties/classification:classes" + }, + { + "type": "array", + "maxItems": 0 + } + ] + }, + "ProcessingExpression": { + "oneOf": [ + { + "$ref": "https://stac-extensions.github.io/processing/v1.1.0/schema.json#/definitions/fields/properties/processing:expression" + }, + { + "type": "null" + } + ] + }, "DataType": { "$ref": "https://stac-extensions.github.io/raster/v1.1.0/schema.json#/definitions/bands/items/properties/data_type" }, @@ -579,438 +715,6 @@ } ] } - }, - - "dlm:runtime": { - "title": "Execution environment", - "description": "Describe the execution environment", - "type": "object", - "required": [ - "framework", - "version", - "model_handler", - "model_src_url", - "requirement_file" - ], - "properties": { - - "docker": { - "title": "Docker runtime specifications", - "type": "object", - "anyOf": [ - { - "required": [ - "docker_file", - "gpu", - "working_dir", - "run" - ] - }, - { - "required": [ - "image_name", - "gpu", - "working_dir", - "run" - ] - } - ], - "properties": { - "docker_runtime": { - "anyOf": [ - { - "docker_file": { - "title": "Docker file url", - "type": "string" - }, - "docker_image": { - "title": "Docker image url", - "type": "string" - } - } - ] - }, - "gpu": { - "title": "Docker runtime requires a gpu", - "type": "boolean" - }, - "image_name": { - "title": "Docker image name", - "type": "string" - }, - "tag": { - "title": "Docker image tag", - "type": "string" - }, - "working_dir": { - "title": "Docker container working dir", - "type": "string" - }, - "run": { - "title": "Docker run parameters", - "type": "string" - } - } - } - } - }, - "dlm:architecture": { - "title": "Model architecture description", - "description": "Describe the model architecture", - "type": "object", - "required": [ - "total_nb_parameters", - "estimated_total_size_mb", - "type", - "pretrained" - ], - "properties": { - "total_nb_parameters": { - "title": "Total number of parameters", - "type": "integer" - }, - "estimated_total_size_mb": { - "title": "Estimated memory size in MB", - "type": "number" - }, - "type": { - "title": "Type of architecture", - "type": "string" - }, - "summary": { - "title": "Summary of the architecture", - "type": "string", - "examples": [ - { - "$ref": "https://raw.githubusercontent.com/crim-ca/dlm-extension/main/examples/model-arch-summary.txt" - } - ] - }, - "pretrained": { - "title": "Pre-training", - "type": "string" - } - } - }, - "dlm:inputs": { - "title": "Description of the input tensor", - "type": "object", - "description": "Describe the inputs required by the model", - "required": [ - "name", - "scaling_factor", - "normalization:mean", - "normalization:std", - "selected_bands", - "input_tensors" - ], - "properties": { - "name": { - "title": "Python name of the tensor", - "type": "string" - }, - "scaling_factor": { - "title": "Scaling factor", - "description": "Scaling factor to be applied on the data in order to bring the range of values between 0 and 1", - "type": "number", - "exclusiveMinimum": 0 - }, - "normalization:mean": { - "title": "Statistical mean", - "type": "array", - "minItems": 1, - "items": { - "type": "number" - } - }, - "normalization:std": { - "title": "Statistical standard-deviation", - "type": "array", - "minItems": 1, - "items": { - "type": "number", - "exclusiveMinimum": 0 - } - }, - "selected_bands": { - "title": "Selected bands", - "type": "array", - "minItems": 1, - "items": { - "type": "integer" - } - }, - "pre_processing_function": { - "title": "Pre-processing function", - "description": "Pre-processing Python function transforming the EO data to a ML-ready tensor", - "type": "string" - }, - "input_tensors": { - "title": "Shape of the input tensor", - "description": "Describe the dimensions of the input tensors", - "type": "array", - "minItems": 1, - "items": { - "properties": { - "batch": { - "title": "Batch size", - "type": "integer", - "minimum": 1 - }, - "dim": { - "title": "Number of channels", - "type": "integer", - "minimum": 1 - }, - "height": { - "title": "Height", - "type": "integer", - "minimum": 1 - }, - "width": { - "title": "Width", - "type": "integer", - "minimum": 1 - } - } - } - } - } - }, - "dlm:outputs": { - "title": "Description of the outputs of the model", - "type": "object", - "description": "Describe the outputs of the model", - "required": [ - "task", - "number_of_classes", - "final_layer_size", - "class_name_mapping" - ], - "properties": { - "task": { - "title": "Task name", - "type": "string", - "enum": [ - "semantic segmentation", - "classification", - "object detection", - "object segmentation" - ] - }, - "number_of_classes": { - "title": "number of classes", - "type": "integer", - "minimum": 1 - }, - "final_layer_size": { - "title": "Output size", - "description": "Size of the tensor from the top layer", - "type": "array", - "minItems": 1, - "items": { - "type": "integer", - "exclusiveMinimum": 0 - } - }, - "dont_care_index": { - "title": "Index of 'dont-care' class", - "description": "In case a 'dont-care' class is used", - "type": "integer" - }, - "post_processing_function": { - "title": "Name of the post-processing file", - "description": "Name of the python file containing a post-processing function", - "type": "string" - }, - "class_name_mapping": { - "description": "This is a lookup table mapping the model output (index) to a class name", - "oneOf": [ - { - "$ref": "#/$defs/dlm:class_name_listing" - }, - { - "$ref": "#/$defs/dlm:class_name_mapping" - } - ] - } - } - }, - "dlm:class_name_listing": { - "deprecated": true, - "type": "array", - "minItems": 1, - "items": { - "oneOf": [ - { - "type": "object", - "properties": { - "index": { - "title": "Class index", - "type": "integer", - "minimum": 0 - }, - "class_name": { - "title": "Class name", - "type": "string" - } - } - }, - { - "$ref": "#/$defs/dlm:class_name_mapping" - } - ] - } - }, - "dlm:class_name_mapping": { - "type": "object", - "patternProperties": { - "^I_": { - "description": "Class name to map the index.", - "type": "string" - } - }, - "additionalProperties": false - }, - "dlm:data": { - "title": "Description of the data requirements", - "type": "object", - "description": "Describe the eo data compatible with the model", - "required": [ - "process_level", - "data_type", - "number_of_bands", - "class_name_mapping" - ], - "properties": { - "process_ level": { - "title": "Data processing level", - "description": "Describe the processing level expected", - "type": "string", - "enum": [ - "raw", - "ortho", - "L0", - "L1", - "L2", - "L3" - ] - }, - "nodata": { - "title": "no data value", - "description": "Sometimes datasets have no data value, this value should be ignored", - "type": "number" - }, - "item_examples": { - "title": "item examples", - "description": "Link to additional data records or stac items", - "type": "array", - "minItems": 1, - "items": { - "properties": { - "url": { - "title": "Link toward an item", - "type": "string" - }, - "title": { - "title": "item description", - "type": "string" - } - } - } - }, - "number_of_bands": { - "title": "number of bands", - "description": "Number of spectral bands expected in the eo data", - "type": "number", - "minimum": 1 - }, - "useful_bands": { - "title": "Useful bands", - "description": "Describe the spectral bands required by the model", - "type": "array", - "minItems": 1, - "items": { - "properties": { - "index": { - "title": "Index of the spectral band", - "description": "Index of the band in the original dataset", - "type": "integer", - "minimum": 0 - }, - "name": { - "title": "Short name of the band", - "type": "string" - } - } - } - }, - "data_type": { - "title": "Data type", - "description": "Data type according to numpy", - "type": "string", - "enum": [ - "byte", - "short", - "intc", - "int_", - "longlong", - "int8", - "int16", - "int32", - "int64", - "ubyte", - "ushort", - "uintc", - "uint", - "ulonglong", - "uint8", - "uint16", - "uint32", - "uint64", - "half", - "double", - "float_", - "longfloat", - "float16", - "float32", - "float64", - "float96", - "float128" - ] - }, - "test_file": { - "title": "Test file", - "type": "string", - "description": "this test file is a data sample" - } - } - }, - "dlm:archive": { - "title": "Description of the archive content", - "description": "Describe the model archive content", - "type": "array", - "minItems": 1, - "items": { - "properties": { - "name": { - "title": "File name", - "type": "string" - }, - "role": { - "title": "Role of the file", - "type": "string", - "enum": [ - "dependency", - "handling function", - "model weight", - "config file", - "test set", - "other" - ] - } - } - } } } } diff --git a/poetry.lock b/poetry.lock index b06d9d6..f01c4cb 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.7.0 and should not be changed by hand. [[package]] name = "annotated-types" @@ -13,13 +13,13 @@ files = [ [[package]] name = "bandit" -version = "1.7.7" +version = "1.7.8" description = "Security oriented static analyser for python code." optional = false python-versions = ">=3.8" files = [ - {file = "bandit-1.7.7-py3-none-any.whl", hash = "sha256:17e60786a7ea3c9ec84569fd5aee09936d116cb0cb43151023258340dbffb7ed"}, - {file = "bandit-1.7.7.tar.gz", hash = "sha256:527906bec6088cb499aae31bc962864b4e77569e9d529ee51df3a93b4b8ab28a"}, + {file = "bandit-1.7.8-py3-none-any.whl", hash = "sha256:509f7af645bc0cd8fd4587abc1a038fc795636671ee8204d502b933aee44f381"}, + {file = "bandit-1.7.8.tar.gz", hash = "sha256:36de50f720856ab24a24dbaa5fee2c66050ed97c1477e0a1159deab1775eab6b"}, ] [package.dependencies] @@ -30,6 +30,7 @@ stevedore = ">=1.20.0" [package.extras] baseline = ["GitPython (>=3.1.30)"] +sarif = ["jschema-to-python (>=1.2.3)", "sarif-om (>=1.0.4)"] test = ["beautifulsoup4 (>=4.8.0)", "coverage (>=4.5.4)", "fixtures (>=3.0.0)", "flake8 (>=4.0.0)", "pylint (==1.9.4)", "stestr (>=2.5.0)", "testscenarios (>=0.5.0)", "testtools (>=2.3.0)"] toml = ["tomli (>=1.1.0)"] yaml = ["PyYAML"] @@ -182,63 +183,63 @@ files = [ [[package]] name = "coverage" -version = "7.4.3" +version = "7.4.4" description = "Code coverage measurement for Python" optional = false python-versions = ">=3.8" files = [ - {file = "coverage-7.4.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8580b827d4746d47294c0e0b92854c85a92c2227927433998f0d3320ae8a71b6"}, - {file = "coverage-7.4.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:718187eeb9849fc6cc23e0d9b092bc2348821c5e1a901c9f8975df0bc785bfd4"}, - {file = "coverage-7.4.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:767b35c3a246bcb55b8044fd3a43b8cd553dd1f9f2c1eeb87a302b1f8daa0524"}, - {file = "coverage-7.4.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae7f19afe0cce50039e2c782bff379c7e347cba335429678450b8fe81c4ef96d"}, - {file = "coverage-7.4.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba3a8aaed13770e970b3df46980cb068d1c24af1a1968b7818b69af8c4347efb"}, - {file = "coverage-7.4.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:ee866acc0861caebb4f2ab79f0b94dbfbdbfadc19f82e6e9c93930f74e11d7a0"}, - {file = "coverage-7.4.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:506edb1dd49e13a2d4cac6a5173317b82a23c9d6e8df63efb4f0380de0fbccbc"}, - {file = "coverage-7.4.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd6545d97c98a192c5ac995d21c894b581f1fd14cf389be90724d21808b657e2"}, - {file = "coverage-7.4.3-cp310-cp310-win32.whl", hash = "sha256:f6a09b360d67e589236a44f0c39218a8efba2593b6abdccc300a8862cffc2f94"}, - {file = "coverage-7.4.3-cp310-cp310-win_amd64.whl", hash = "sha256:18d90523ce7553dd0b7e23cbb28865db23cddfd683a38fb224115f7826de78d0"}, - {file = "coverage-7.4.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cbbe5e739d45a52f3200a771c6d2c7acf89eb2524890a4a3aa1a7fa0695d2a47"}, - {file = "coverage-7.4.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:489763b2d037b164846ebac0cbd368b8a4ca56385c4090807ff9fad817de4113"}, - {file = "coverage-7.4.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:451f433ad901b3bb00184d83fd83d135fb682d780b38af7944c9faeecb1e0bfe"}, - {file = "coverage-7.4.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fcc66e222cf4c719fe7722a403888b1f5e1682d1679bd780e2b26c18bb648cdc"}, - {file = "coverage-7.4.3-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b3ec74cfef2d985e145baae90d9b1b32f85e1741b04cd967aaf9cfa84c1334f3"}, - {file = "coverage-7.4.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:abbbd8093c5229c72d4c2926afaee0e6e3140de69d5dcd918b2921f2f0c8baba"}, - {file = "coverage-7.4.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:35eb581efdacf7b7422af677b92170da4ef34500467381e805944a3201df2079"}, - {file = "coverage-7.4.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:8249b1c7334be8f8c3abcaaa996e1e4927b0e5a23b65f5bf6cfe3180d8ca7840"}, - {file = "coverage-7.4.3-cp311-cp311-win32.whl", hash = "sha256:cf30900aa1ba595312ae41978b95e256e419d8a823af79ce670835409fc02ad3"}, - {file = "coverage-7.4.3-cp311-cp311-win_amd64.whl", hash = "sha256:18c7320695c949de11a351742ee001849912fd57e62a706d83dfc1581897fa2e"}, - {file = "coverage-7.4.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b51bfc348925e92a9bd9b2e48dad13431b57011fd1038f08316e6bf1df107d10"}, - {file = "coverage-7.4.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d6cdecaedea1ea9e033d8adf6a0ab11107b49571bbb9737175444cea6eb72328"}, - {file = "coverage-7.4.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3b2eccb883368f9e972e216c7b4c7c06cabda925b5f06dde0650281cb7666a30"}, - {file = "coverage-7.4.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6c00cdc8fa4e50e1cc1f941a7f2e3e0f26cb2a1233c9696f26963ff58445bac7"}, - {file = "coverage-7.4.3-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b9a4a8dd3dcf4cbd3165737358e4d7dfbd9d59902ad11e3b15eebb6393b0446e"}, - {file = "coverage-7.4.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:062b0a75d9261e2f9c6d071753f7eef0fc9caf3a2c82d36d76667ba7b6470003"}, - {file = "coverage-7.4.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:ebe7c9e67a2d15fa97b77ea6571ce5e1e1f6b0db71d1d5e96f8d2bf134303c1d"}, - {file = "coverage-7.4.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:c0a120238dd71c68484f02562f6d446d736adcc6ca0993712289b102705a9a3a"}, - {file = "coverage-7.4.3-cp312-cp312-win32.whl", hash = "sha256:37389611ba54fd6d278fde86eb2c013c8e50232e38f5c68235d09d0a3f8aa352"}, - {file = "coverage-7.4.3-cp312-cp312-win_amd64.whl", hash = "sha256:d25b937a5d9ffa857d41be042b4238dd61db888533b53bc76dc082cb5a15e914"}, - {file = "coverage-7.4.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:28ca2098939eabab044ad68850aac8f8db6bf0b29bc7f2887d05889b17346454"}, - {file = "coverage-7.4.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:280459f0a03cecbe8800786cdc23067a8fc64c0bd51dc614008d9c36e1659d7e"}, - {file = "coverage-7.4.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c0cdedd3500e0511eac1517bf560149764b7d8e65cb800d8bf1c63ebf39edd2"}, - {file = "coverage-7.4.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9a9babb9466fe1da12417a4aed923e90124a534736de6201794a3aea9d98484e"}, - {file = "coverage-7.4.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dec9de46a33cf2dd87a5254af095a409ea3bf952d85ad339751e7de6d962cde6"}, - {file = "coverage-7.4.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:16bae383a9cc5abab9bb05c10a3e5a52e0a788325dc9ba8499e821885928968c"}, - {file = "coverage-7.4.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:2c854ce44e1ee31bda4e318af1dbcfc929026d12c5ed030095ad98197eeeaed0"}, - {file = "coverage-7.4.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:ce8c50520f57ec57aa21a63ea4f325c7b657386b3f02ccaedeccf9ebe27686e1"}, - {file = "coverage-7.4.3-cp38-cp38-win32.whl", hash = "sha256:708a3369dcf055c00ddeeaa2b20f0dd1ce664eeabde6623e516c5228b753654f"}, - {file = "coverage-7.4.3-cp38-cp38-win_amd64.whl", hash = "sha256:1bf25fbca0c8d121a3e92a2a0555c7e5bc981aee5c3fdaf4bb7809f410f696b9"}, - {file = "coverage-7.4.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3b253094dbe1b431d3a4ac2f053b6d7ede2664ac559705a704f621742e034f1f"}, - {file = "coverage-7.4.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:77fbfc5720cceac9c200054b9fab50cb2a7d79660609200ab83f5db96162d20c"}, - {file = "coverage-7.4.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6679060424faa9c11808598504c3ab472de4531c571ab2befa32f4971835788e"}, - {file = "coverage-7.4.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4af154d617c875b52651dd8dd17a31270c495082f3d55f6128e7629658d63765"}, - {file = "coverage-7.4.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8640f1fde5e1b8e3439fe482cdc2b0bb6c329f4bb161927c28d2e8879c6029ee"}, - {file = "coverage-7.4.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:69b9f6f66c0af29642e73a520b6fed25ff9fd69a25975ebe6acb297234eda501"}, - {file = "coverage-7.4.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:0842571634f39016a6c03e9d4aba502be652a6e4455fadb73cd3a3a49173e38f"}, - {file = "coverage-7.4.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a78ed23b08e8ab524551f52953a8a05d61c3a760781762aac49f8de6eede8c45"}, - {file = "coverage-7.4.3-cp39-cp39-win32.whl", hash = "sha256:c0524de3ff096e15fcbfe8f056fdb4ea0bf497d584454f344d59fce069d3e6e9"}, - {file = "coverage-7.4.3-cp39-cp39-win_amd64.whl", hash = "sha256:0209a6369ccce576b43bb227dc8322d8ef9e323d089c6f3f26a597b09cb4d2aa"}, - {file = "coverage-7.4.3-pp38.pp39.pp310-none-any.whl", hash = "sha256:7cbde573904625509a3f37b6fecea974e363460b556a627c60dc2f47e2fffa51"}, - {file = "coverage-7.4.3.tar.gz", hash = "sha256:276f6077a5c61447a48d133ed13e759c09e62aff0dc84274a68dc18660104d52"}, + {file = "coverage-7.4.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e0be5efd5127542ef31f165de269f77560d6cdef525fffa446de6f7e9186cfb2"}, + {file = "coverage-7.4.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ccd341521be3d1b3daeb41960ae94a5e87abe2f46f17224ba5d6f2b8398016cf"}, + {file = "coverage-7.4.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09fa497a8ab37784fbb20ab699c246053ac294d13fc7eb40ec007a5043ec91f8"}, + {file = "coverage-7.4.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b1a93009cb80730c9bca5d6d4665494b725b6e8e157c1cb7f2db5b4b122ea562"}, + {file = "coverage-7.4.4-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:690db6517f09336559dc0b5f55342df62370a48f5469fabf502db2c6d1cffcd2"}, + {file = "coverage-7.4.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:09c3255458533cb76ef55da8cc49ffab9e33f083739c8bd4f58e79fecfe288f7"}, + {file = "coverage-7.4.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:8ce1415194b4a6bd0cdcc3a1dfbf58b63f910dcb7330fe15bdff542c56949f87"}, + {file = "coverage-7.4.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b91cbc4b195444e7e258ba27ac33769c41b94967919f10037e6355e998af255c"}, + {file = "coverage-7.4.4-cp310-cp310-win32.whl", hash = "sha256:598825b51b81c808cb6f078dcb972f96af96b078faa47af7dfcdf282835baa8d"}, + {file = "coverage-7.4.4-cp310-cp310-win_amd64.whl", hash = "sha256:09ef9199ed6653989ebbcaacc9b62b514bb63ea2f90256e71fea3ed74bd8ff6f"}, + {file = "coverage-7.4.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0f9f50e7ef2a71e2fae92774c99170eb8304e3fdf9c8c3c7ae9bab3e7229c5cf"}, + {file = "coverage-7.4.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:623512f8ba53c422fcfb2ce68362c97945095b864cda94a92edbaf5994201083"}, + {file = "coverage-7.4.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0513b9508b93da4e1716744ef6ebc507aff016ba115ffe8ecff744d1322a7b63"}, + {file = "coverage-7.4.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40209e141059b9370a2657c9b15607815359ab3ef9918f0196b6fccce8d3230f"}, + {file = "coverage-7.4.4-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8a2b2b78c78293782fd3767d53e6474582f62443d0504b1554370bde86cc8227"}, + {file = "coverage-7.4.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:73bfb9c09951125d06ee473bed216e2c3742f530fc5acc1383883125de76d9cd"}, + {file = "coverage-7.4.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:1f384c3cc76aeedce208643697fb3e8437604b512255de6d18dae3f27655a384"}, + {file = "coverage-7.4.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:54eb8d1bf7cacfbf2a3186019bcf01d11c666bd495ed18717162f7eb1e9dd00b"}, + {file = "coverage-7.4.4-cp311-cp311-win32.whl", hash = "sha256:cac99918c7bba15302a2d81f0312c08054a3359eaa1929c7e4b26ebe41e9b286"}, + {file = "coverage-7.4.4-cp311-cp311-win_amd64.whl", hash = "sha256:b14706df8b2de49869ae03a5ccbc211f4041750cd4a66f698df89d44f4bd30ec"}, + {file = "coverage-7.4.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:201bef2eea65e0e9c56343115ba3814e896afe6d36ffd37bab783261db430f76"}, + {file = "coverage-7.4.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:41c9c5f3de16b903b610d09650e5e27adbfa7f500302718c9ffd1c12cf9d6818"}, + {file = "coverage-7.4.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d898fe162d26929b5960e4e138651f7427048e72c853607f2b200909794ed978"}, + {file = "coverage-7.4.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3ea79bb50e805cd6ac058dfa3b5c8f6c040cb87fe83de10845857f5535d1db70"}, + {file = "coverage-7.4.4-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce4b94265ca988c3f8e479e741693d143026632672e3ff924f25fab50518dd51"}, + {file = "coverage-7.4.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:00838a35b882694afda09f85e469c96367daa3f3f2b097d846a7216993d37f4c"}, + {file = "coverage-7.4.4-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:fdfafb32984684eb03c2d83e1e51f64f0906b11e64482df3c5db936ce3839d48"}, + {file = "coverage-7.4.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:69eb372f7e2ece89f14751fbcbe470295d73ed41ecd37ca36ed2eb47512a6ab9"}, + {file = "coverage-7.4.4-cp312-cp312-win32.whl", hash = "sha256:137eb07173141545e07403cca94ab625cc1cc6bc4c1e97b6e3846270e7e1fea0"}, + {file = "coverage-7.4.4-cp312-cp312-win_amd64.whl", hash = "sha256:d71eec7d83298f1af3326ce0ff1d0ea83c7cb98f72b577097f9083b20bdaf05e"}, + {file = "coverage-7.4.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d5ae728ff3b5401cc320d792866987e7e7e880e6ebd24433b70a33b643bb0384"}, + {file = "coverage-7.4.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:cc4f1358cb0c78edef3ed237ef2c86056206bb8d9140e73b6b89fbcfcbdd40e1"}, + {file = "coverage-7.4.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8130a2aa2acb8788e0b56938786c33c7c98562697bf9f4c7d6e8e5e3a0501e4a"}, + {file = "coverage-7.4.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cf271892d13e43bc2b51e6908ec9a6a5094a4df1d8af0bfc360088ee6c684409"}, + {file = "coverage-7.4.4-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a4cdc86d54b5da0df6d3d3a2f0b710949286094c3a6700c21e9015932b81447e"}, + {file = "coverage-7.4.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:ae71e7ddb7a413dd60052e90528f2f65270aad4b509563af6d03d53e979feafd"}, + {file = "coverage-7.4.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:38dd60d7bf242c4ed5b38e094baf6401faa114fc09e9e6632374388a404f98e7"}, + {file = "coverage-7.4.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:aa5b1c1bfc28384f1f53b69a023d789f72b2e0ab1b3787aae16992a7ca21056c"}, + {file = "coverage-7.4.4-cp38-cp38-win32.whl", hash = "sha256:dfa8fe35a0bb90382837b238fff375de15f0dcdb9ae68ff85f7a63649c98527e"}, + {file = "coverage-7.4.4-cp38-cp38-win_amd64.whl", hash = "sha256:b2991665420a803495e0b90a79233c1433d6ed77ef282e8e152a324bbbc5e0c8"}, + {file = "coverage-7.4.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3b799445b9f7ee8bf299cfaed6f5b226c0037b74886a4e11515e569b36fe310d"}, + {file = "coverage-7.4.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b4d33f418f46362995f1e9d4f3a35a1b6322cb959c31d88ae56b0298e1c22357"}, + {file = "coverage-7.4.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aadacf9a2f407a4688d700e4ebab33a7e2e408f2ca04dbf4aef17585389eff3e"}, + {file = "coverage-7.4.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7c95949560050d04d46b919301826525597f07b33beba6187d04fa64d47ac82e"}, + {file = "coverage-7.4.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ff7687ca3d7028d8a5f0ebae95a6e4827c5616b31a4ee1192bdfde697db110d4"}, + {file = "coverage-7.4.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:5fc1de20b2d4a061b3df27ab9b7c7111e9a710f10dc2b84d33a4ab25065994ec"}, + {file = "coverage-7.4.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:c74880fc64d4958159fbd537a091d2a585448a8f8508bf248d72112723974cbd"}, + {file = "coverage-7.4.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:742a76a12aa45b44d236815d282b03cfb1de3b4323f3e4ec933acfae08e54ade"}, + {file = "coverage-7.4.4-cp39-cp39-win32.whl", hash = "sha256:d89d7b2974cae412400e88f35d86af72208e1ede1a541954af5d944a8ba46c57"}, + {file = "coverage-7.4.4-cp39-cp39-win_amd64.whl", hash = "sha256:9ca28a302acb19b6af89e90f33ee3e1906961f94b54ea37de6737b7ca9d8827c"}, + {file = "coverage-7.4.4-pp38.pp39.pp310-none-any.whl", hash = "sha256:b2c5edc4ac10a7ef6605a966c58929ec6c1bd0917fb8c15cb3363f65aa40e677"}, + {file = "coverage-7.4.4.tar.gz", hash = "sha256:c901df83d097649e257e803be22592aedfd5182f07b3cc87d640bbb9afd50f49"}, ] [package.dependencies] @@ -304,18 +305,18 @@ test = ["pytest (>=6)"] [[package]] name = "filelock" -version = "3.13.1" +version = "3.13.3" description = "A platform independent file lock." optional = false python-versions = ">=3.8" files = [ - {file = "filelock-3.13.1-py3-none-any.whl", hash = "sha256:57dbda9b35157b05fb3e58ee91448612eb674172fab98ee235ccb0b5bee19a1c"}, - {file = "filelock-3.13.1.tar.gz", hash = "sha256:521f5f56c50f8426f5e03ad3b281b490a87ef15bc6c526f168290f0c7148d44e"}, + {file = "filelock-3.13.3-py3-none-any.whl", hash = "sha256:5ffa845303983e7a0b7ae17636509bc97997d58afeafa72fb141a17b152284cb"}, + {file = "filelock-3.13.3.tar.gz", hash = "sha256:a79895a25bbefdf55d1a2a0a80968f7dbb28edcd6d4234a0afb3f37ecde4b546"}, ] [package.extras] -docs = ["furo (>=2023.9.10)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.24)"] -testing = ["covdefaults (>=2.3)", "coverage (>=7.3.2)", "diff-cover (>=8)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)", "pytest-timeout (>=2.2)"] +docs = ["furo (>=2023.9.10)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"] +testing = ["covdefaults (>=2.3)", "coverage (>=7.3.2)", "diff-cover (>=8.0.1)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)", "pytest-timeout (>=2.2)"] typing = ["typing-extensions (>=4.8)"] [[package]] @@ -506,13 +507,13 @@ files = [ [[package]] name = "packaging" -version = "23.2" +version = "24.0" description = "Core utilities for Python packages" optional = false python-versions = ">=3.7" files = [ - {file = "packaging-23.2-py3-none-any.whl", hash = "sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7"}, - {file = "packaging-23.2.tar.gz", hash = "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5"}, + {file = "packaging-24.0-py3-none-any.whl", hash = "sha256:2ddfb553fdf02fb784c234c7ba6ccc288296ceabec964ad2eae3777778130bc5"}, + {file = "packaging-24.0.tar.gz", hash = "sha256:eb82c5e3e56209074766e6885bb04b8c38a0c015d0a30036ebe7ece34c9989e9"}, ] [[package]] @@ -598,18 +599,18 @@ files = [ [[package]] name = "pydantic" -version = "2.6.3" +version = "2.7.0b1" description = "Data validation using Python type hints" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic-2.6.3-py3-none-any.whl", hash = "sha256:72c6034df47f46ccdf81869fddb81aade68056003900a8724a4f160700016a2a"}, - {file = "pydantic-2.6.3.tar.gz", hash = "sha256:e07805c4c7f5c6826e33a1d4c9d47950d7eaf34868e2690f8594d2e30241f11f"}, + {file = "pydantic-2.7.0b1-py3-none-any.whl", hash = "sha256:c9cfcbfac6177f9e988fcffa727c42164ad03c3c8cd128057553c2d724fb6556"}, + {file = "pydantic-2.7.0b1.tar.gz", hash = "sha256:b0b45e2f249f7a304a8a3b724e03b206bd23ad584669fe31dbb3e38199fc9ff7"}, ] [package.dependencies] annotated-types = ">=0.4.0" -pydantic-core = "2.16.3" +pydantic-core = "2.18.0" typing-extensions = ">=4.6.1" [package.extras] @@ -617,90 +618,90 @@ email = ["email-validator (>=2.0.0)"] [[package]] name = "pydantic-core" -version = "2.16.3" -description = "" +version = "2.18.0" +description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic_core-2.16.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:75b81e678d1c1ede0785c7f46690621e4c6e63ccd9192af1f0bd9d504bbb6bf4"}, - {file = "pydantic_core-2.16.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9c865a7ee6f93783bd5d781af5a4c43dadc37053a5b42f7d18dc019f8c9d2bd1"}, - {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:162e498303d2b1c036b957a1278fa0899d02b2842f1ff901b6395104c5554a45"}, - {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2f583bd01bbfbff4eaee0868e6fc607efdfcc2b03c1c766b06a707abbc856187"}, - {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b926dd38db1519ed3043a4de50214e0d600d404099c3392f098a7f9d75029ff8"}, - {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:716b542728d4c742353448765aa7cdaa519a7b82f9564130e2b3f6766018c9ec"}, - {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc4ad7f7ee1a13d9cb49d8198cd7d7e3aa93e425f371a68235f784e99741561f"}, - {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bd87f48924f360e5d1c5f770d6155ce0e7d83f7b4e10c2f9ec001c73cf475c99"}, - {file = "pydantic_core-2.16.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0df446663464884297c793874573549229f9eca73b59360878f382a0fc085979"}, - {file = "pydantic_core-2.16.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4df8a199d9f6afc5ae9a65f8f95ee52cae389a8c6b20163762bde0426275b7db"}, - {file = "pydantic_core-2.16.3-cp310-none-win32.whl", hash = "sha256:456855f57b413f077dff513a5a28ed838dbbb15082ba00f80750377eed23d132"}, - {file = "pydantic_core-2.16.3-cp310-none-win_amd64.whl", hash = "sha256:732da3243e1b8d3eab8c6ae23ae6a58548849d2e4a4e03a1924c8ddf71a387cb"}, - {file = "pydantic_core-2.16.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:519ae0312616026bf4cedc0fe459e982734f3ca82ee8c7246c19b650b60a5ee4"}, - {file = "pydantic_core-2.16.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b3992a322a5617ded0a9f23fd06dbc1e4bd7cf39bc4ccf344b10f80af58beacd"}, - {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8d62da299c6ecb04df729e4b5c52dc0d53f4f8430b4492b93aa8de1f541c4aac"}, - {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2acca2be4bb2f2147ada8cac612f8a98fc09f41c89f87add7256ad27332c2fda"}, - {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1b662180108c55dfbf1280d865b2d116633d436cfc0bba82323554873967b340"}, - {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e7c6ed0dc9d8e65f24f5824291550139fe6f37fac03788d4580da0d33bc00c97"}, - {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a6b1bb0827f56654b4437955555dc3aeeebeddc47c2d7ed575477f082622c49e"}, - {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e56f8186d6210ac7ece503193ec84104da7ceb98f68ce18c07282fcc2452e76f"}, - {file = "pydantic_core-2.16.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:936e5db01dd49476fa8f4383c259b8b1303d5dd5fb34c97de194560698cc2c5e"}, - {file = "pydantic_core-2.16.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:33809aebac276089b78db106ee692bdc9044710e26f24a9a2eaa35a0f9fa70ba"}, - {file = "pydantic_core-2.16.3-cp311-none-win32.whl", hash = "sha256:ded1c35f15c9dea16ead9bffcde9bb5c7c031bff076355dc58dcb1cb436c4721"}, - {file = "pydantic_core-2.16.3-cp311-none-win_amd64.whl", hash = "sha256:d89ca19cdd0dd5f31606a9329e309d4fcbb3df860960acec32630297d61820df"}, - {file = "pydantic_core-2.16.3-cp311-none-win_arm64.whl", hash = "sha256:6162f8d2dc27ba21027f261e4fa26f8bcb3cf9784b7f9499466a311ac284b5b9"}, - {file = "pydantic_core-2.16.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:0f56ae86b60ea987ae8bcd6654a887238fd53d1384f9b222ac457070b7ac4cff"}, - {file = "pydantic_core-2.16.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c9bd22a2a639e26171068f8ebb5400ce2c1bc7d17959f60a3b753ae13c632975"}, - {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4204e773b4b408062960e65468d5346bdfe139247ee5f1ca2a378983e11388a2"}, - {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f651dd19363c632f4abe3480a7c87a9773be27cfe1341aef06e8759599454120"}, - {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aaf09e615a0bf98d406657e0008e4a8701b11481840be7d31755dc9f97c44053"}, - {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8e47755d8152c1ab5b55928ab422a76e2e7b22b5ed8e90a7d584268dd49e9c6b"}, - {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:500960cb3a0543a724a81ba859da816e8cf01b0e6aaeedf2c3775d12ee49cade"}, - {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cf6204fe865da605285c34cf1172879d0314ff267b1c35ff59de7154f35fdc2e"}, - {file = "pydantic_core-2.16.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d33dd21f572545649f90c38c227cc8631268ba25c460b5569abebdd0ec5974ca"}, - {file = "pydantic_core-2.16.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:49d5d58abd4b83fb8ce763be7794d09b2f50f10aa65c0f0c1696c677edeb7cbf"}, - {file = "pydantic_core-2.16.3-cp312-none-win32.whl", hash = "sha256:f53aace168a2a10582e570b7736cc5bef12cae9cf21775e3eafac597e8551fbe"}, - {file = "pydantic_core-2.16.3-cp312-none-win_amd64.whl", hash = "sha256:0d32576b1de5a30d9a97f300cc6a3f4694c428d956adbc7e6e2f9cad279e45ed"}, - {file = "pydantic_core-2.16.3-cp312-none-win_arm64.whl", hash = "sha256:ec08be75bb268473677edb83ba71e7e74b43c008e4a7b1907c6d57e940bf34b6"}, - {file = "pydantic_core-2.16.3-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:b1f6f5938d63c6139860f044e2538baeee6f0b251a1816e7adb6cbce106a1f01"}, - {file = "pydantic_core-2.16.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2a1ef6a36fdbf71538142ed604ad19b82f67b05749512e47f247a6ddd06afdc7"}, - {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:704d35ecc7e9c31d48926150afada60401c55efa3b46cd1ded5a01bdffaf1d48"}, - {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d937653a696465677ed583124b94a4b2d79f5e30b2c46115a68e482c6a591c8a"}, - {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c9803edf8e29bd825f43481f19c37f50d2b01899448273b3a7758441b512acf8"}, - {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:72282ad4892a9fb2da25defeac8c2e84352c108705c972db82ab121d15f14e6d"}, - {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f752826b5b8361193df55afcdf8ca6a57d0232653494ba473630a83ba50d8c9"}, - {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4384a8f68ddb31a0b0c3deae88765f5868a1b9148939c3f4121233314ad5532c"}, - {file = "pydantic_core-2.16.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:a4b2bf78342c40b3dc830880106f54328928ff03e357935ad26c7128bbd66ce8"}, - {file = "pydantic_core-2.16.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:13dcc4802961b5f843a9385fc821a0b0135e8c07fc3d9949fd49627c1a5e6ae5"}, - {file = "pydantic_core-2.16.3-cp38-none-win32.whl", hash = "sha256:e3e70c94a0c3841e6aa831edab1619ad5c511199be94d0c11ba75fe06efe107a"}, - {file = "pydantic_core-2.16.3-cp38-none-win_amd64.whl", hash = "sha256:ecdf6bf5f578615f2e985a5e1f6572e23aa632c4bd1dc67f8f406d445ac115ed"}, - {file = "pydantic_core-2.16.3-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:bda1ee3e08252b8d41fa5537413ffdddd58fa73107171a126d3b9ff001b9b820"}, - {file = "pydantic_core-2.16.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:21b888c973e4f26b7a96491c0965a8a312e13be108022ee510248fe379a5fa23"}, - {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:be0ec334369316fa73448cc8c982c01e5d2a81c95969d58b8f6e272884df0074"}, - {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b5b6079cc452a7c53dd378c6f881ac528246b3ac9aae0f8eef98498a75657805"}, - {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ee8d5f878dccb6d499ba4d30d757111847b6849ae07acdd1205fffa1fc1253c"}, - {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7233d65d9d651242a68801159763d09e9ec96e8a158dbf118dc090cd77a104c9"}, - {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c6119dc90483a5cb50a1306adb8d52c66e447da88ea44f323e0ae1a5fcb14256"}, - {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:578114bc803a4c1ff9946d977c221e4376620a46cf78da267d946397dc9514a8"}, - {file = "pydantic_core-2.16.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d8f99b147ff3fcf6b3cc60cb0c39ea443884d5559a30b1481e92495f2310ff2b"}, - {file = "pydantic_core-2.16.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4ac6b4ce1e7283d715c4b729d8f9dab9627586dafce81d9eaa009dd7f25dd972"}, - {file = "pydantic_core-2.16.3-cp39-none-win32.whl", hash = "sha256:e7774b570e61cb998490c5235740d475413a1f6de823169b4cf94e2fe9e9f6b2"}, - {file = "pydantic_core-2.16.3-cp39-none-win_amd64.whl", hash = "sha256:9091632a25b8b87b9a605ec0e61f241c456e9248bfdcf7abdf344fdb169c81cf"}, - {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:36fa178aacbc277bc6b62a2c3da95226520da4f4e9e206fdf076484363895d2c"}, - {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:dcca5d2bf65c6fb591fff92da03f94cd4f315972f97c21975398bd4bd046854a"}, - {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2a72fb9963cba4cd5793854fd12f4cfee731e86df140f59ff52a49b3552db241"}, - {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b60cc1a081f80a2105a59385b92d82278b15d80ebb3adb200542ae165cd7d183"}, - {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cbcc558401de90a746d02ef330c528f2e668c83350f045833543cd57ecead1ad"}, - {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:fee427241c2d9fb7192b658190f9f5fd6dfe41e02f3c1489d2ec1e6a5ab1e04a"}, - {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f4cb85f693044e0f71f394ff76c98ddc1bc0953e48c061725e540396d5c8a2e1"}, - {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:b29eeb887aa931c2fcef5aa515d9d176d25006794610c264ddc114c053bf96fe"}, - {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a425479ee40ff021f8216c9d07a6a3b54b31c8267c6e17aa88b70d7ebd0e5e5b"}, - {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:5c5cbc703168d1b7a838668998308018a2718c2130595e8e190220238addc96f"}, - {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:99b6add4c0b39a513d323d3b93bc173dac663c27b99860dd5bf491b240d26137"}, - {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75f76ee558751746d6a38f89d60b6228fa174e5172d143886af0f85aa306fd89"}, - {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:00ee1c97b5364b84cb0bd82e9bbf645d5e2871fb8c58059d158412fee2d33d8a"}, - {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:287073c66748f624be4cef893ef9174e3eb88fe0b8a78dc22e88eca4bc357ca6"}, - {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:ed25e1835c00a332cb10c683cd39da96a719ab1dfc08427d476bce41b92531fc"}, - {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:86b3d0033580bd6bbe07590152007275bd7af95f98eaa5bd36f3da219dcd93da"}, - {file = "pydantic_core-2.16.3.tar.gz", hash = "sha256:1cac689f80a3abab2d3c0048b29eea5751114054f032a941a32de4c852c59cad"}, + {file = "pydantic_core-2.18.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:c36ee17f0b85e98d5488a60bd4a022cb1e82f1995cc891bb371c1a15a52e5833"}, + {file = "pydantic_core-2.18.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3992d08de5ece507d334f166bd489eef46226ae26ecf890338a6bca710042d5e"}, + {file = "pydantic_core-2.18.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c3e9cc290c91f300a435f7c8dca9ce8e492fb2f3c57dddef7aa8e56e5d33f962"}, + {file = "pydantic_core-2.18.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1d68106d504c34bc9971e6eca22ef603a95b4531449ee8460f136bc6a77dc7a3"}, + {file = "pydantic_core-2.18.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2b5b8c26d5152be544ec9fcbac5087ffef1f3b831d0cba168016ac7e6063a29a"}, + {file = "pydantic_core-2.18.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:84eee9cd65aadba8aa45d3a5f7ce09a9263d2c1788dbb6d40f4f5345f76f97a6"}, + {file = "pydantic_core-2.18.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac856c69bd2aefcaa1c29ebb7d3c191e9de7aad063284c1e760c43983ad18c3a"}, + {file = "pydantic_core-2.18.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1b4a5de4867c582aa61ea7c83d977b9243c264c7e6c45d8b61dfb0f2bd243395"}, + {file = "pydantic_core-2.18.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:6559ffbf66fae9d333aaf8c34b67e83912999781120c90e6aed59ae6077ed74f"}, + {file = "pydantic_core-2.18.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4e04cb00fdb79b6b8e1085d2152e1a2dfa21640f6a962476740e1542e07e8b0f"}, + {file = "pydantic_core-2.18.0-cp310-none-win32.whl", hash = "sha256:a83fd7a2983c9bb6bd1aec7257a8a96c29d48c30d9d8b8ae13a44155163dd42d"}, + {file = "pydantic_core-2.18.0-cp310-none-win_amd64.whl", hash = "sha256:5d8f4e95917439ba4398d9e2ce2be9f5840e91ea63ae018b3b148d48e99e99c1"}, + {file = "pydantic_core-2.18.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:a99333701e0cf16ac8a646c92d5b9dc9f8cadd0a026f50bf0ddde34eede70bc3"}, + {file = "pydantic_core-2.18.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:647ce6122e6ae6b972076793851efd284c4b51b93ed4071d6735bcf44e663c03"}, + {file = "pydantic_core-2.18.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18b2dc6a2a027828377175613cfb3f69c40b347084886c2ca5bb1b713c3c0c1f"}, + {file = "pydantic_core-2.18.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8e179a23237f30452776ab3fd094fd0005f45615ab826c0bb077f5657f0e84db"}, + {file = "pydantic_core-2.18.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:569a28b5f7c5a1c9b9eea5b41f18f3e0235ec25212c4b7fa98add07e3b4fce2a"}, + {file = "pydantic_core-2.18.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ca09d373225203c5849200019d7bb8fc50d4f466e9d10d67205c3e2da1221df6"}, + {file = "pydantic_core-2.18.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:adf5935716452b77a64e51f1344c34aab8c2e956ba32da9d038dc7f73e2262c7"}, + {file = "pydantic_core-2.18.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2f5570b47048ad0421411e9bdf1b96eee8816aeaeea7c8db78877ecd9339685f"}, + {file = "pydantic_core-2.18.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a598c0786327db680ac04f8e5125cd4a729528713c09bb8fd0c40e66750bc89f"}, + {file = "pydantic_core-2.18.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cb7ee00b493cd28998d6cd6089e994f0cc08fed5113f5dd09b8bb8c27b5dc55c"}, + {file = "pydantic_core-2.18.0-cp311-none-win32.whl", hash = "sha256:7676ec76faab6c4dbc7fdaf644f70af27ccf1868c7157da352fb55206a35e4d3"}, + {file = "pydantic_core-2.18.0-cp311-none-win_amd64.whl", hash = "sha256:c21fe62521eaf617fbb04b0fcf9af085e8dc7ea3a3ee22da3af671475f29aed1"}, + {file = "pydantic_core-2.18.0-cp311-none-win_arm64.whl", hash = "sha256:c5ee382586174d3639092b32a1a7ba4cfdadd67b2539814ddc42542d6e622dd0"}, + {file = "pydantic_core-2.18.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:c141b49350139f94a94d9268b82c0e7f91b05f1f479b785de1a5499460e68864"}, + {file = "pydantic_core-2.18.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:68e0ee55f7d521a8e7556217219112a1e9bc55b4484c8959c24e2e1a0da874d9"}, + {file = "pydantic_core-2.18.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88adc4fc547c8f8b0025974c27fd4671ec2f7ee375859a1c88313a8a63b4615e"}, + {file = "pydantic_core-2.18.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6399de345097e76a3d7420a25411939fb72fcc51890847c8b8599a43fd0b7439"}, + {file = "pydantic_core-2.18.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:83def986dea51011b9bad66b7481aabff5863cd05bd17cab4f228378d918292b"}, + {file = "pydantic_core-2.18.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2ebe41d751e3347b5d5880498a965bd6523285ce5e7907d70de33c221dc347a4"}, + {file = "pydantic_core-2.18.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3fc20e48c936e1453e2797b28044f4cd3004c98296294b4aac31170ff44b8496"}, + {file = "pydantic_core-2.18.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:68827e0dc97c047e527dd6b86f5b4b1605faefa7a18d8f227d8f6754a6747f63"}, + {file = "pydantic_core-2.18.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d0bc6036cea7f7ba419ce1b8f2e0f8e27eddcde626fcad507edb5b7519073006"}, + {file = "pydantic_core-2.18.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:c96ec95751deb156d036b348e1eef758e82326989d7e2e9fc9479d1f30b90da3"}, + {file = "pydantic_core-2.18.0-cp312-none-win32.whl", hash = "sha256:f527522a0e5470e04c75cc2f3bb272f6940acc9e426a38a6ec60ae708c1f6d58"}, + {file = "pydantic_core-2.18.0-cp312-none-win_amd64.whl", hash = "sha256:6ef640a492dad6fbe289eb91a88d7f67d6ca984db556ee1a3891a5fff4a412d2"}, + {file = "pydantic_core-2.18.0-cp312-none-win_arm64.whl", hash = "sha256:362f29ffcf78b20d2507bd39c348233a33cb0c9d70bbb26e85fc521690683e2c"}, + {file = "pydantic_core-2.18.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:f1264b478a8e5283db4eea8344d53dc608dac862ea74b1f81d1edcd785451702"}, + {file = "pydantic_core-2.18.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4368aaa4d68acf489b67a7ecb0d6f8a0c478a4491e4eb8c2b9f352800322ed32"}, + {file = "pydantic_core-2.18.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aabce6144cc2cd43e2363b463f6ba2979c7b77bad7e3ac732fc69b19e097ffcd"}, + {file = "pydantic_core-2.18.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:765b970000068ce5b42c7ffab0bcc86fd8ce141a9e3910c6f9b1bcdea158b233"}, + {file = "pydantic_core-2.18.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7d0c402862402430378e72927763c5f71554db494006d32f15d48d80dca25ef1"}, + {file = "pydantic_core-2.18.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dd2b5c7eff2e1e4d97a5d7f2e399301e774d10f883fd355689f5e225c2283c42"}, + {file = "pydantic_core-2.18.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b39578677908dca286c7a6565512f0321dd4591a9bd013c34c3e3004316a814"}, + {file = "pydantic_core-2.18.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:15c91b087938e1917e6f66c82928808312df403f869affb48a6d1fb9aca948c2"}, + {file = "pydantic_core-2.18.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:702ddde99e36cc25d674ae3bdd21aeab0460e7bdf3f587057db2240485e48366"}, + {file = "pydantic_core-2.18.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:26b9f7654f7d076e35f51f369b885efe877313d9d9fab3d6d291ad3ea25e10dd"}, + {file = "pydantic_core-2.18.0-cp38-none-win32.whl", hash = "sha256:d714d80d505db509781e686b1ec6ae0f0f4d0ce5ee3a91a75a41d4da2592276f"}, + {file = "pydantic_core-2.18.0-cp38-none-win_amd64.whl", hash = "sha256:7f4e623d413d78dac0e66f6aff68d6ea43993acd954fbb1840fffebf0ef3e90a"}, + {file = "pydantic_core-2.18.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:347efc12f055c44383d8b41e7ee72a6189156d9bfaa2952c349856432b3cae91"}, + {file = "pydantic_core-2.18.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d432e0c0177ae5b64f3c302b7a9a62b36b9abe3210d078540fd633d90144375b"}, + {file = "pydantic_core-2.18.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:86e05c39ed6862d6864771f57d29e31ace0e91c3b8971bf5d53b2ed9156a025e"}, + {file = "pydantic_core-2.18.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4dd1aa6c7f3bea171d237a70abc105e3cda903c4ba95aea82bec11e59d45833e"}, + {file = "pydantic_core-2.18.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1cfb9b1879cbf5a87b1b3be76ae312866b96adbc6b5c55c5e9a3934f1c0d242f"}, + {file = "pydantic_core-2.18.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aff8b042ce90ec873d7dd97302cadeac9768c0e536cf2452ee34e1c50a9e466d"}, + {file = "pydantic_core-2.18.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d0a30b40b76306b58e951e2eaaafdd94292df188efe33c72fd1f503a1ea375a"}, + {file = "pydantic_core-2.18.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e1065cb8c9c14ea6a8c76c7c113b4d8173be2dca984c5a3ab0d6ce364ea8b502"}, + {file = "pydantic_core-2.18.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b2772b00d0d1a0e2bfe463526f23637dbc8c7fa3c80c43bca66fe4312406412a"}, + {file = "pydantic_core-2.18.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ef52699c236366c4b18b485e9eecc3e5f215ef89b08e3e02a3a16a5abc97a69c"}, + {file = "pydantic_core-2.18.0-cp39-none-win32.whl", hash = "sha256:68b0ea179fc4ca681c651f272a9d0d42ad2a6e352f3d431c3cfba490719e40a0"}, + {file = "pydantic_core-2.18.0-cp39-none-win_amd64.whl", hash = "sha256:25b94e99e7aee8760c62a22e1dae2946318d2c44bdeb9be5f23ae1433cd6ba0f"}, + {file = "pydantic_core-2.18.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:2269c1c4ab33b1cf091da878fbb739d00027649394c6c4e95a10faf5efec12b5"}, + {file = "pydantic_core-2.18.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:59f6a4444daed0265773ad6fed1495e212bb3b8e1157957b67505aa772645674"}, + {file = "pydantic_core-2.18.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ceb5a387c50d751dd25e677b5928b57ba69ee4151657369e3ead1664e12a02a"}, + {file = "pydantic_core-2.18.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d9d9a17bdcf50312d3775bb60fe3c2f4b0fd5443b2705af58e491466fde291e3"}, + {file = "pydantic_core-2.18.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3a91c26005f4950d09380c82fe12b7014ca56dbc4d32e4f5a3ca5d8879d68170"}, + {file = "pydantic_core-2.18.0-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:057cb50ccdcbeef19e517cfa4ac8be8b3220dcee153770bb52d266c219e1c3d3"}, + {file = "pydantic_core-2.18.0-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:70696bf40bb588f5d62b0e79fde72d432e909551c3f2f3bfcb1674d7cacc7007"}, + {file = "pydantic_core-2.18.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:8a6d93401b503a54a4ce5ddc9ccd6f5b89b271b1fe0c72fc4428443b2451d765"}, + {file = "pydantic_core-2.18.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b496cab9ac75c8e7bda7d17e8a2d0db2f610dcced5ef465ef19122a17245b0f8"}, + {file = "pydantic_core-2.18.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:6d5c13ee3a9052f4ca8e7dd65dac9749c503dd96974ed1f908e0b933b9c689be"}, + {file = "pydantic_core-2.18.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0d86a800371db0590804881346b8610bd62c5f5396d544da5ae814a863a9e1b"}, + {file = "pydantic_core-2.18.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6598ed75a1ac49784a042af54cf2db3febfa2642717b12abaf6745339f69b5d7"}, + {file = "pydantic_core-2.18.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8ccf3e031d2dadf999d78d543d9ec9ce9fef40ae8a3c3a5a35041709d734d0d2"}, + {file = "pydantic_core-2.18.0-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:324018576490157103965281df89d287cbf18415fb3fcbb0a66efa23f2b5a497"}, + {file = "pydantic_core-2.18.0-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:5b109e4a7828b7cd5fa7bb63c6125203711298d0b1f1b83d0f9786c7ce3d689b"}, + {file = "pydantic_core-2.18.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:b770ae0d064a2d858f68c933217e01ea372de25685a52b4e98b26ea5684811c0"}, + {file = "pydantic_core-2.18.0.tar.gz", hash = "sha256:a6d075404af8b8feb42f86196e08053bfae282af2701321f36a1553e966ce1f0"}, ] [package.dependencies] @@ -760,24 +761,24 @@ windows-terminal = ["colorama (>=0.4.6)"] [[package]] name = "pystac" -version = "1.9.0" +version = "1.10.0" description = "Python library for working with the SpatioTemporal Asset Catalog (STAC) specification" optional = false python-versions = ">=3.9" files = [ - {file = "pystac-1.9.0-py3-none-any.whl", hash = "sha256:64d5654166290169ad6ad2bc0d5337a1664ede1165635f0b73b327065b801a2f"}, - {file = "pystac-1.9.0.tar.gz", hash = "sha256:c6b5a86e241fca5e9267a7902c26679f208749a107e9015fe6aaf73a9dd40948"}, + {file = "pystac-1.10.0-py3-none-any.whl", hash = "sha256:2d1eb969abc7e13e2bdb4bb5ae1a68780da1e06f30f66fcf0d4143f51eb03f38"}, + {file = "pystac-1.10.0.tar.gz", hash = "sha256:e2762a700953ae9bab914137116cea31e08378f6c7024d805d651009a6341e20"}, ] [package.dependencies] python-dateutil = ">=2.7.0" [package.extras] -bench = ["asv (>=0.6.0,<0.7.0)", "packaging (>=23.1,<24.0)", "virtualenv (>=20.22,<21.0)"] -docs = ["Sphinx (>=6.2,<7.0)", "boto3 (>=1.28,<2.0)", "ipython (>=8.12,<9.0)", "jinja2 (<4.0)", "jupyter (>=1.0,<2.0)", "nbsphinx (>=0.9.0,<0.10.0)", "pydata-sphinx-theme (>=0.13,<1.0)", "rasterio (>=1.3,<2.0)", "shapely (>=2.0,<3.0)", "sphinx-autobuild (==2021.3.14)", "sphinx-design (>=0.5.0,<0.6.0)", "sphinxcontrib-fulltoc (>=1.2,<2.0)"] +bench = ["asv (>=0.6.0,<0.7.0)", "packaging (>=24.0,<25.0)", "virtualenv (>=20.22,<21.0)"] +docs = ["Sphinx (>=6.2,<7.0)", "boto3 (>=1.28,<2.0)", "ipython (>=8.12,<9.0)", "jinja2 (<4.0)", "jupyter (>=1.0,<2.0)", "nbsphinx (>=0.9.0,<0.10.0)", "pydata-sphinx-theme (>=0.13,<1.0)", "rasterio (>=1.3,<2.0)", "shapely (>=2.0,<3.0)", "sphinx-autobuild (==2024.2.4)", "sphinx-design (>=0.5.0,<0.6.0)", "sphinxcontrib-fulltoc (>=1.2,<2.0)"] jinja2 = ["jinja2 (<4.0)"] orjson = ["orjson (>=3.5)"] -test = ["black (>=23.3,<24.0)", "codespell (>=2.2,<3.0)", "coverage (>=7.2,<8.0)", "doc8 (>=1.1,<2.0)", "html5lib (>=1.1,<2.0)", "jinja2 (<4.0)", "jsonschema (>=4.18,<5.0)", "mypy (>=1.2,<2.0)", "orjson (>=3.8,<4.0)", "pre-commit (>=3.2,<4.0)", "pytest (>=7.3,<8.0)", "pytest-cov (>=4.0,<5.0)", "pytest-mock (>=3.10,<4.0)", "pytest-recording (>=0.13.0,<0.14.0)", "requests-mock (>=1.11,<2.0)", "ruff (==0.1.1)", "types-html5lib (>=1.1,<2.0)", "types-jsonschema (>=4.18,<5.0)", "types-orjson (>=3.6,<4.0)", "types-python-dateutil (>=2.8,<3.0)", "types-urllib3 (>=1.26,<2.0)"] +test = ["black (>=24.0,<25.0)", "codespell (>=2.2,<3.0)", "coverage (>=7.2,<8.0)", "doc8 (>=1.1,<2.0)", "html5lib (>=1.1,<2.0)", "jinja2 (<4.0)", "jsonschema (>=4.18,<5.0)", "mypy (>=1.2,<2.0)", "orjson (>=3.8,<4.0)", "pre-commit (>=3.2,<4.0)", "pytest (>=8.0,<9.0)", "pytest-cov (>=5.0,<6.0)", "pytest-mock (>=3.10,<4.0)", "pytest-recording (>=0.13.0,<0.14.0)", "requests-mock (>=1.11,<2.0)", "ruff (==0.3.4)", "types-html5lib (>=1.1,<2.0)", "types-jsonschema (>=4.18,<5.0)", "types-orjson (>=3.6,<4.0)", "types-python-dateutil (>=2.8,<3.0)", "types-urllib3 (>=1.26,<2.0)"] urllib3 = ["urllib3 (>=1.26)"] validation = ["jsonschema (>=4.18,<5.0)"] @@ -891,17 +892,17 @@ test = ["black (>=22.1.0)", "flake8 (>=4.0.1)", "pre-commit (>=2.17.0)", "tox (> [[package]] name = "pytest-mock" -version = "3.12.0" +version = "3.14.0" description = "Thin-wrapper around the mock package for easier use with pytest" optional = false python-versions = ">=3.8" files = [ - {file = "pytest-mock-3.12.0.tar.gz", hash = "sha256:31a40f038c22cad32287bb43932054451ff5583ff094bca6f675df2f8bc1a6e9"}, - {file = "pytest_mock-3.12.0-py3-none-any.whl", hash = "sha256:0972719a7263072da3a21c7f4773069bcc7486027d7e8e1f81d98a47e701bc4f"}, + {file = "pytest-mock-3.14.0.tar.gz", hash = "sha256:2719255a1efeceadbc056d6bf3df3d1c5015530fb40cf347c0f9afac88410bd0"}, + {file = "pytest_mock-3.14.0-py3-none-any.whl", hash = "sha256:0b72c38033392a5f4621342fe11e9219ac11ec9d375f8e2a0c164539e0d70f6f"}, ] [package.dependencies] -pytest = ">=5.0" +pytest = ">=6.2.5" [package.extras] dev = ["pre-commit", "pytest-asyncio", "tox"] @@ -992,6 +993,7 @@ files = [ {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, + {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"}, {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, @@ -1193,18 +1195,18 @@ gitlab = ["python-gitlab (>=1.3.0)"] [[package]] name = "setuptools" -version = "69.1.1" +version = "69.2.0" description = "Easily download, build, install, upgrade, and uninstall Python packages" optional = false python-versions = ">=3.8" files = [ - {file = "setuptools-69.1.1-py3-none-any.whl", hash = "sha256:02fa291a0471b3a18b2b2481ed902af520c69e8ae0919c13da936542754b4c56"}, - {file = "setuptools-69.1.1.tar.gz", hash = "sha256:5c0806c7d9af348e6dd3777b4f4dbb42c7ad85b190104837488eab9a7c945cf8"}, + {file = "setuptools-69.2.0-py3-none-any.whl", hash = "sha256:c21c49fb1042386df081cb5d86759792ab89efca84cf114889191cd09aacc80c"}, + {file = "setuptools-69.2.0.tar.gz", hash = "sha256:0ff4183f8f42cd8fa3acea16c45205521a4ef28f73c6391d8a25e92893134f2e"}, ] [package.extras] docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"] -testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "packaging (>=23.2)", "pip (>=19.1)", "pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-home (>=0.5)", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-ruff (>=0.2.1)", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] +testing = ["build[virtualenv]", "filelock (>=3.4.0)", "importlib-metadata", "ini2toml[lite] (>=0.9)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "mypy (==1.9)", "packaging (>=23.2)", "pip (>=19.1)", "pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-home (>=0.5)", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-ruff (>=0.2.1)", "pytest-timeout", "pytest-xdist (>=3)", "tomli", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] testing-integration = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "packaging (>=23.2)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"] [[package]] @@ -1338,13 +1340,13 @@ files = [ [[package]] name = "typer" -version = "0.9.0" +version = "0.9.4" description = "Typer, build great CLIs. Easy to code. Based on Python type hints." optional = false python-versions = ">=3.6" files = [ - {file = "typer-0.9.0-py3-none-any.whl", hash = "sha256:5d96d986a21493606a358cae4461bd8cdf83cbf33a5aa950ae629ca3b51467ee"}, - {file = "typer-0.9.0.tar.gz", hash = "sha256:50922fd79aea2f4751a8e0408ff10d2662bd0c8bbfa84755a699f3bada2978b2"}, + {file = "typer-0.9.4-py3-none-any.whl", hash = "sha256:aa6c4a4e2329d868b80ecbaf16f807f2b54e192209d7ac9dd42691d63f7a54eb"}, + {file = "typer-0.9.4.tar.gz", hash = "sha256:f714c2d90afae3a7929fcd72a3abb08df305e1ff61719381384211c4070af57f"}, ] [package.dependencies] @@ -1358,7 +1360,7 @@ typing-extensions = ">=3.7.4.3" all = ["colorama (>=0.4.3,<0.5.0)", "rich (>=10.11.0,<14.0.0)", "shellingham (>=1.3.0,<2.0.0)"] dev = ["autoflake (>=1.3.1,<2.0.0)", "flake8 (>=3.8.3,<4.0.0)", "pre-commit (>=2.17.0,<3.0.0)"] doc = ["cairosvg (>=2.5.2,<3.0.0)", "mdx-include (>=1.4.1,<2.0.0)", "mkdocs (>=1.1.2,<2.0.0)", "mkdocs-material (>=8.1.4,<9.0.0)", "pillow (>=9.3.0,<10.0.0)"] -test = ["black (>=22.3.0,<23.0.0)", "coverage (>=6.2,<7.0)", "isort (>=5.0.6,<6.0.0)", "mypy (==0.910)", "pytest (>=4.4.0,<8.0.0)", "pytest-cov (>=2.10.0,<5.0.0)", "pytest-sugar (>=0.9.4,<0.10.0)", "pytest-xdist (>=1.32.0,<4.0.0)", "rich (>=10.11.0,<14.0.0)", "shellingham (>=1.3.0,<2.0.0)"] +test = ["black (>=22.3.0,<23.0.0)", "coverage (>=6.2,<7.0)", "isort (>=5.0.6,<6.0.0)", "mypy (==0.971)", "pytest (>=4.4.0,<8.0.0)", "pytest-cov (>=2.10.0,<5.0.0)", "pytest-sugar (>=0.9.4,<0.10.0)", "pytest-xdist (>=1.32.0,<4.0.0)", "rich (>=10.11.0,<14.0.0)", "shellingham (>=1.3.0,<2.0.0)"] [[package]] name = "typing-extensions" @@ -1411,4 +1413,4 @@ test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess [metadata] lock-version = "2.0" python-versions = "^3.10" -content-hash = "9437634706b27f73a8577b43f479ed8698df60d23b2779ce92b44dfdb531acbd" +content-hash = "1c9995bdb53dc27d8bd473ea4f6bdbcf1461a543ac03014ccae08bebb1462d8c" diff --git a/pyproject.toml b/pyproject.toml index 33d2cf3..07c79b5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -66,7 +66,7 @@ mypy-extensions = "^0.4.3" pre-commit = "^2.21.0" bandit = "^1.7.5" safety = "^2.3.4" - +pystac = "^1.10.0" # custom validator required (https://github.com/stac-utils/pystac/pull/1320) pydocstyle = {extras = ["toml"], version = "^6.2.0"} pydoclint = "^0.3.0" diff --git a/stac_model/examples.py b/stac_model/examples.py index 106b76f..8e0ede4 100644 --- a/stac_model/examples.py +++ b/stac_model/examples.py @@ -15,7 +15,7 @@ ) -def eurosat_resnet(): +def eurosat_resnet() -> MLModelExtension[pystac.Item]: input_array = InputArray( shape=[-1, 13, 64, 64], dim_order="bchw", data_type="float32" ) @@ -91,7 +91,7 @@ def eurosat_resnet(): commit_hash="61efd2e2c4df7ebe3bd03002ebbaeaa3cfe9885a", ) result_array = ResultArray( - shape=[-1, 10], dim_names=["batch", "class"], data_type="float32" + shape=[-1, 10], dim_order=["batch", "class"], data_type="float32" ) class_map = { "Annual Crop": 0, @@ -158,6 +158,6 @@ def eurosat_resnet(): item.add_derived_from( "https://earth-search.aws.element84.com/v1/collections/sentinel-2-l2a" ) - item_mlmodel = MLModelExtension.ext(item, add_if_missing=True) - item_mlmodel.apply(ml_model_meta.model_dump()) - return item_mlmodel + item_mlm = MLModelExtension.ext(item, add_if_missing=True) + item_mlm.apply(ml_model_meta.model_dump()) + return item_mlm diff --git a/stac_model/input.py b/stac_model/input.py index 5e315c2..c453dbb 100644 --- a/stac_model/input.py +++ b/stac_model/input.py @@ -5,7 +5,7 @@ class InputArray(BaseModel): shape: List[Union[int, float]] - dim_order: Literal["bhw", "bchw", "bthw", "btchw"] + dim_order: List[str] data_type: str = Field( ..., pattern="^(uint8|uint16|uint32|uint64|int8|int16|int32|int64|float16|float32|float64|cint16|cint32|cfloat32|cfloat64|other)$", diff --git a/stac_model/output.py b/stac_model/output.py index 11a7b40..0b2e919 100644 --- a/stac_model/output.py +++ b/stac_model/output.py @@ -20,12 +20,13 @@ class TaskEnum(str, Enum): class ResultArray(BaseModel): shape: List[Union[int, float]] - dim_names: List[str] + dim_order: List[str] data_type: str = Field( ..., pattern="^(uint8|uint16|uint32|uint64|int8|int16|int32|int64|float16|float32|float64)$", ) + class ClassObject(BaseModel): value: int name: str @@ -34,6 +35,7 @@ class ClassObject(BaseModel): color_hint: Optional[str] = None nodata: Optional[bool] = False + class ModelOutput(BaseModel): task: TaskEnum result_array: Optional[List[ResultArray]] = None diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..b33cce0 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,50 @@ +import json +import os +from typing import Any, Dict, cast + +import pystac +import pytest + +from stac_model.examples import eurosat_resnet as make_eurosat_resnet +from stac_model.schema import SCHEMA_URI + +TEST_DIR = os.path.dirname(__file__) +EXAMPLES_DIR = os.path.abspath(os.path.join(TEST_DIR, "../examples")) +JSON_SCHEMA_DIR = os.path.abspath(os.path.join(TEST_DIR, "../json-schema")) + + +@pytest.fixture(scope="session") +def mlm_schema() -> Dict[str, Any]: + with open(os.path.join(JSON_SCHEMA_DIR, "schema.json")) as schema_file: + return json.load(schema_file) + + +@pytest.fixture(scope="session", autouse=True) +def mlm_validator( + request: pytest.FixtureRequest, + mlm_schema: Dict[str, Any], +) -> pystac.validation.stac_validator.JsonSchemaSTACValidator: + """ + Update the :class:`pystac.validation.RegisteredValidator` with the local ML-AOI JSON schema definition. + + Because the schema is *not yet* uploaded to the expected STAC schema URI, + any call to :func:`pystac.validation.validate` or :meth:`pystac.stac_object.STACObject.validate` results + in ``GetSchemaError`` when the schema retrieval is attempted by the validator. By adding the schema to the + mapping beforehand, remote resolution can be bypassed temporarily. + """ + validator = pystac.validation.RegisteredValidator.get_validator() + validator = cast(pystac.validation.stac_validator.JsonSchemaSTACValidator, validator) + validator.schema_cache[SCHEMA_URI] = mlm_schema + pystac.validation.RegisteredValidator.set_validator(validator) # apply globally to allow 'STACObject.validate()' + return validator + + +@pytest.fixture(scope="session", autouse=True) +def mlm_example() -> Dict[str, Any]: + with open(os.path.join(EXAMPLES_DIR, "example.json")) as example_file: + return json.load(example_file) + + +@pytest.fixture(name="eurosat_resnet") +def eurosat_resnet(): + return make_eurosat_resnet() diff --git a/tests/test_schema.py b/tests/test_schema.py index 45d6e36..a25f69d 100644 --- a/tests/test_schema.py +++ b/tests/test_schema.py @@ -1,18 +1,15 @@ -import pytest +import pystac -@pytest.fixture -def mlmodel_metadata_item(): - from stac_model.examples import eurosat_resnet +def test_mlm_schema(mlm_validator, mlm_example): + mlm_item = pystac.Item.from_dict(mlm_example) + invalid = pystac.validation.validate(mlm_item, validator=mlm_validator) + assert not invalid - model_metadata_stac_item = eurosat_resnet() - return model_metadata_stac_item +def test_model_metadata_to_dict(eurosat_resnet): + assert eurosat_resnet.item.to_dict() -def test_model_metadata_to_dict(mlmodel_metadata_item): - assert mlmodel_metadata_item.item.to_dict() - -def test_validate_model_metadata(mlmodel_metadata_item): - import pystac - assert pystac.read_dict(mlmodel_metadata_item.item.to_dict()) +def test_validate_model_metadata(eurosat_resnet): + assert pystac.read_dict(eurosat_resnet.item.to_dict()) From ab4176519f9a48df1ac4bb125b0d2a417d83e1c0 Mon Sep 17 00:00:00 2001 From: Francis Charette-Migneault Date: Thu, 4 Apr 2024 13:15:38 -0400 Subject: [PATCH 080/112] more details about expected values for dim_order + pretrained flag --- README.md | 61 ++++++++++++++++++++++++++++++++++++++----------------- 1 file changed, 42 insertions(+), 19 deletions(-) diff --git a/README.md b/README.md index 929a66a..f30aefe 100644 --- a/README.md +++ b/README.md @@ -72,7 +72,8 @@ The fields in the table below can be used in these parts of STAC documents: | mlm:framework_version | string | The `framework` library version. Some models require a specific version of the machine learning `framework` to run. | | mlm:memory_size | integer | The in-memory size of the model on the accelerator during inference (bytes). | | mlm:total_parameters | integer | Total number of model parameters, including trainable and non-trainable parameters. | -| mlm:pretrained_source | string \| null | The source of the pretraining. Can refer to popular pretraining datasets by name (i.e. Imagenet) or less known datasets by URL and description. If trained from scratch, the `null` value should be set explicitly. | +| mlm:pretrained | boolean | Indicates if the model was pretrained. If the model was pretrained, consider providing `pretrained_source` if it is known. | +| mlm:pretrained_source | string \| null | The source of the pretraining. Can refer to popular pretraining datasets by name (i.e. Imagenet) or less known datasets by URL and description. If trained from scratch (i.e.: `pretrained = false`), the `null` value should be set explicitly. | | mlm:batch_size_suggestion | integer | A suggested batch size for the accelerator and summarized hardware. | | mlm:accelerator | [Accelerator Enum](#accelerator-enum) \| null | The intended computational hardware that runs inference. If undefined or set to `null` explicitly, the model does not require any specific accelerator. | | mlm:accelerator_constrained | boolean | Indicates if the intended `accelerator` is the only `accelerator` that can run inference. If undefined, it should be assumed `false`. | @@ -200,7 +201,7 @@ set to `true`, there would be no `accelerator` to contain against. To avoid conf | Field Name | Type | Description | |-------------------------|---------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| name | string | **REQUIRED** Name of the input variable defined by the model. If no explicit name is defined by the model, an informative name (e.g.: "RGB Time Series") can be used instead. | +| name | string | **REQUIRED** Name of the input variable defined by the model. If no explicit name is defined by the model, an informative name (e.g.: `"RGB Time Series"`) can be used instead. | | bands | \[string] | **REQUIRED** The names of the raster bands used to train or fine-tune the model, which may be all or a subset of bands available in a STAC Item's [Band Object](#bands-and-statistics). If no band applies for one input, use an empty array. | | input | [Input Structure Object](#input-structure-object) | **REQUIRED** The N-dimensional array definition that describes the shape, dimension ordering, and data type. | | norm_by_channel | boolean | Whether to normalize each channel by channel-wise statistics or to normalize by dataset statistics. If True, use an array of `statistics` of same dimensionality and order as the `bands` field in this object. | @@ -255,16 +256,37 @@ Both definitions should define equivalent values. #### Input Structure Object -| Field Name | Type | Description | -|------------|-----------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| shape | [integer] | **REQUIRED** Shape of the input n-dimensional array ($N \times C \times H \times W$), including the batch size dimension. Each dimension must either be greater than 0 or -1 to indicate a variable dimension size. | -| dim_order | string | **REQUIRED** How the above dimensions are ordered within the `shape`. `bhw`, `bchw`, `bthw`, `btchw` are valid orderings where `b`=batch, `c`=channel, `t`=time, `h`=height, `w`=width. | -| data_type | [Data Type Enum](#data-type-enum) | **REQUIRED** The data type of values in the n-dimensional array. For model inputs, this should be the data type of the processed input supplied to the model inference function, not the data type of the source bands. | +| Field Name | Type | Description | +|------------|----------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| shape | \[integer] | **REQUIRED** Shape of the input n-dimensional array (e.g.: $B \times C \times H \times W$), including the batch size dimension. Each dimension must either be greater than 0 or -1 to indicate a variable dimension size. | +| dim_order | \[[Dimension Order](#dimension-order)] | **REQUIRED** Order of the `shape` dimensions by name. | +| data_type | [Data Type Enum](#data-type-enum) | **REQUIRED** The data type of values in the n-dimensional array. For model inputs, this should be the data type of the processed input supplied to the model inference function, not the data type of the source bands. | A common use of `-1` for one dimension of `shape` is to indicate a variable batch-size. However, this value is not strictly reserved for the `b` dimension. For example, if the model is capable of automatically adjusting its input layer to adapt to the provided input data, -then the corresponding dimensions that can be adapted can employ `-1` as well. +then the corresponding dimensions that can be adapted can employ `-1` as well. + +#### Dimension Order + +Recommended values should use common names as much as possible to allow better interpretation by users and scripts +that could need to resolve the dimension ordering for reshaping requirements according to the ML framework employed. + +Below are some notable common names recommended for use, but others can be employed as needed. + +- `batch` +- `channel` +- `time` +- `height` +- `width` +- `depth` +- `token` +- `class` +- `score` +- `confidence` + +For example, a tensor of multiple RBG images represented as $B \times C \times H \times W$ should +indicate `dim_order = ["batch", "channel", "height", "width"]`. #### Normalize Enum @@ -342,12 +364,13 @@ the following formats are recommended as alternative scripts and function refere ### Model Output Object -| Field Name | Type | Description | -|--------------------------|-----------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| tasks | \[[Task Enum](#task-enum)] | **REQUIRED** Specifies the Machine Learning tasks for which the output can be used for. This can be a subset of `mlm:tasks` defined under the Item `properties` as applicable. | -| result | [Result Structure Object](#result-structure-object) | The structure that describes the resulting output arrays/tensors from one model head. | -| classification:classes | \[[Class Object](#class-object)] | A list of class objects adhering to the [Classification Extension](https://github.com/stac-extensions/classification). | -| post_processing_function | [Processing Expression](#processing-expression) \| null | Custom postprocessing function where normalization and rescaling, and any other significant operations takes place. | +| Field Name | Type | Description | +|--------------------------|---------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| name | string | **REQUIRED** Name of the output variable defined by the model. If no explicit name is defined by the model, an informative name (e.g.: `"CLASSIFICATION"`) can be used instead. | +| tasks | \[[Task Enum](#task-enum)] | **REQUIRED** Specifies the Machine Learning tasks for which the output can be used for. This can be a subset of `mlm:tasks` defined under the Item `properties` as applicable. | +| result | [Result Structure Object](#result-structure-object) | **REQUIRED** The structure that describes the resulting output arrays/tensors from one model head. | +| classification:classes | \[[Class Object](#class-object)] | A list of class objects adhering to the [Classification Extension](https://github.com/stac-extensions/classification). | +| post_processing_function | [Processing Expression](#processing-expression) \| null | Custom postprocessing function where normalization and rescaling, and any other significant operations takes place. | While only `tasks` is a required field, all fields are recommended for tasks that produce a fixed shape tensor and have output classes. Outputs that have variable dimensions, can define the `result` with the @@ -357,11 +380,11 @@ as for `regression`, `image-captioning`, `super-resolution` and some `generative #### Result Structure Object -| Field Name | Type | Description | -|------------|-----------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| shape | \[integer] | **REQUIRED** Shape of the n-dimensional result array ($N \times H \times W$), possibly including a batch size dimension. The batch size dimension must either be greater than 0 or -1 to indicate an unspecified batch dimension size. | -| dim_names | \[string] | **REQUIRED** The names of the above dimensions of the result array, ordered the same as this object's `shape` field. | -| data_type | [Data Type Enum](#data-type-enum) | **REQUIRED** The data type of values in the n-dimensional array. For model outputs, this should be the data type of the result of the model inference without extra post processing. | +| Field Name | Type | Description | +|------------|----------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| shape | \[integer] | **REQUIRED** Shape of the n-dimensional result array (e.g.: $B \times H \times W$ or $B \times C$), possibly including a batch size dimension. The dimensions must either be greater than 0 or -1 to indicate a variable size. | +| dim_order | \[[Dimension Order](#dimension-order)] | **REQUIRED** Order of the `shape` dimensions by name for the result array. | +| data_type | [Data Type Enum](#data-type-enum) | **REQUIRED** The data type of values in the n-dimensional array. For model outputs, this should be the data type of the result of the model inference without extra post processing. | #### Class Object From be58e86dbdf50214767d8bc507e3f887315377c4 Mon Sep 17 00:00:00 2001 From: Francis Charette-Migneault Date: Thu, 4 Apr 2024 13:16:08 -0400 Subject: [PATCH 081/112] address incompatibility of 'end_datetime=null' with STAC Core (relates to https://github.com/radiantearth/stac-spec/issues/1268) --- best-practices.md | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/best-practices.md b/best-practices.md index 9c55c4f..578127b 100644 --- a/best-practices.md +++ b/best-practices.md @@ -37,12 +37,23 @@ choose to apply it for contexts outside the *recommended* extent for the same re As another example, let us consider a model which is trained on imagery from all over the world and is robust enough to be applied to any time period. In this case, the common metadata to use with the model -could include the bbox of "the world" `[-90, -180, 90, 180]` and the `start_datetime` and `end_datetime` range could -be generic values like `["1900-01-01", null]`. However, it is to be noted that generic and very broad spatiotemporal -extents like these rarely reflect the reality regarding the capabilities and precision of the model to predict reliable +could include the bbox of "the world" `[-90, -180, 90, 180]` and the `start_datetime` and `end_datetime` range +would ideally be generic values like `["1900-01-01", null]` (see warning below). +However, due to limitations with the STAC 1.0 specification, this time extent is not applicable. + +> [!WARNING] +> The `null` value is not allowed for datetime specification. +> As a workaround, the `end_datetime` can be set with a "very large value" +> (similarly to `start_datetime` set with a small value), such as `"9999-12-31T23:59:59Z"`. +> Alternatively, the model can instead be described with only `datetime` corresponding to its publication date. +>

+> For more details, see the following [discussion](https://github.com/radiantearth/stac-spec/issues/1268). + +It is to be noted that generic and very broad spatiotemporal +extents like above rarely reflect the reality regarding the capabilities and precision of the model to predict reliable results. If a more restrained area and time of interest can be identified, such as the ranges for which the training dataset applies, or a test split dataset that validates the applicability of the model on other domains, those should -be provided instead. +be provided instead. Nevertheless, users of the model are still free to apply it outside the specified extents. If specific datasets with training/validation/test splits are known to support the claims of the suggested extent for the model, it is recommended that they are included as reference to the STAC Item/Collection using MLM. For more From 8b46388f0d666f4cb08863f02086199c192b768e Mon Sep 17 00:00:00 2001 From: Francis Charette-Migneault Date: Thu, 4 Apr 2024 13:39:22 -0400 Subject: [PATCH 082/112] add mlm:hyperparameters defintion (fixes https://github.com/crim-ca/dlm-extension/issues/14) --- README.md | 22 ++++++++++++++++++++++ json-schema/schema.json | 11 +++++++++++ 2 files changed, 33 insertions(+) diff --git a/README.md b/README.md index f30aefe..50087dd 100644 --- a/README.md +++ b/README.md @@ -81,6 +81,7 @@ The fields in the table below can be used in these parts of STAC documents: | mlm:accelerator_count | integer | A minimum amount of `accelerator` instances required to run the model. | | mlm:input | \[[Model Input Object](#model-input-object)] | **REQUIRED** Describes the transformation between the EO data and the model input. | | mlm:output | \[[Model Output Object](#model-output-object)] | **REQUIRED** Describes each model output and how to interpret it. | +| mlm:hyperparameters | [Model Hyperparameters Object](#model-hyperparameters-object) | Additional hyperparameters relevant for the model. | To decide whether above fields should be applied under Item `properties` or under respective Assets, the context of each field must be considered. For example, the `mlm:name` should always be provided in the Item `properties`, since @@ -391,6 +392,27 @@ as for `regression`, `image-captioning`, `super-resolution` and some `generative See the documentation for the [Class Object](https://github.com/stac-extensions/classification?tab=readme-ov-file#class-object). +### Model Hyperparameters Object + +The hyperparameters are an open JSON object definition that can be used to provide relevant configurations for the +model. Those can combine training details, inference runtime parameters, or both. For example, training hyperparameters +could indicate the number of epochs that were used, the optimizer employed, the number of estimators contained in an +ensemble of models, or the random state value. For inference, parameters such as the model temperature, a confidence +cut-off threshold, or a non-maximum suppression threshold to limit proposal could be specified. The specific parameter +names, and how they should be employed by the model, are specific to each implementation. + +Following is an example of what the hyperparameters definition could look like: + +```json +{ + "mlm:hyperparameters": { + "nms_max_detections": 500, + "nms_threshold": 0.25, + "iou_threshold": 0.5, + "random_state": 12345 + } +} +``` ## Assets Objects diff --git a/json-schema/schema.json b/json-schema/schema.json index c039238..bf56f44 100644 --- a/json-schema/schema.json +++ b/json-schema/schema.json @@ -187,6 +187,9 @@ }, "mlm:output": { "$ref": "#/$defs/mlm:output" + }, + "mlm:hyperparameters": { + "$ref": "#/$defs/mlm:hyperparameters" } }, "$comment": "Allow properties not defined by MLM prefix to allow combination with other extensions.", @@ -435,6 +438,14 @@ } } }, + "mlm:hyperparameters": { + "type": "object", + "minProperties": 1, + "patternProperties": { + "^[0-9a-zA-Z_.-]+$": true + }, + "additionalProperties": false + }, "InputStructure": { "title": "Input Structure Object", "type": "object", From 2b8729765ee7010d2e909ee5c5fe722d9e0c7fe6 Mon Sep 17 00:00:00 2001 From: Francis Charette-Migneault Date: Thu, 4 Apr 2024 14:57:37 -0400 Subject: [PATCH 083/112] add example bands and statitics details --- README.md | 5 + best-practices.md | 2 +- examples/example.json | 238 +++++++++++++++++++++++++++++++++++++--- json-schema/schema.json | 4 + 4 files changed, 235 insertions(+), 14 deletions(-) diff --git a/README.md b/README.md index 50087dd..1cbff67 100644 --- a/README.md +++ b/README.md @@ -307,6 +307,11 @@ Select one option from: See [OpenCV - Interpolation Flags](https://docs.opencv.org/4.x/da/d54/group__imgproc__transform.html#ga5bb5a1fea74ea38e1a5445ca803ff121) for details about the relevant methods. Equivalent methods from other packages are applicable as well. +When a normalization technique is specified, it is expected that the corresponding [Statistics](#bands-and-statistics) +parameters necessary to perform it would be provided for the corresponding input. +For example, the `min-max` normalization would require that at least the `minimum` and `maximum` statistic properties +are provided, while the `z-score` would require `mean` and `stddev`. + If none of the above values applies, `null` (literal, not string) can be used instead. If a custom normalization operation, or a combination of operations (with or without [Resize](#resize-enum)), must be defined instead, consider using a [Processing Expression](#processing-expression) reference. diff --git a/best-practices.md b/best-practices.md index 578127b..7e14d3e 100644 --- a/best-practices.md +++ b/best-practices.md @@ -38,7 +38,7 @@ choose to apply it for contexts outside the *recommended* extent for the same re As another example, let us consider a model which is trained on imagery from all over the world and is robust enough to be applied to any time period. In this case, the common metadata to use with the model could include the bbox of "the world" `[-90, -180, 90, 180]` and the `start_datetime` and `end_datetime` range -would ideally be generic values like `["1900-01-01", null]` (see warning below). +would ideally be generic values like `["1900-01-01T00:00:00Z", null]` (see warning below). However, due to limitations with the STAC 1.0 specification, this time extent is not applicable. > [!WARNING] diff --git a/examples/example.json b/examples/example.json index ba10bba..f37231c 100644 --- a/examples/example.json +++ b/examples/example.json @@ -1,6 +1,11 @@ { - "type": "Feature", "stac_version": "1.0.0", + "stac_extensions": [ + "https://stac-extensions.github.io/mlm/v1.0.0/schema.json", + "https://stac-extensions.github.io/eo/v1.1.0/schema.json", + "https://stac-extensions.github.io/raster/v1.1.0/schema.json" + ], + "type": "Feature", "id": "resnet-18_sentinel-2_all_moco_classification", "geometry": { "type": "Polygon", @@ -29,8 +34,15 @@ ] ] }, + "bbox": [ + -7.882190080512502, + 37.13739173208318, + 27.911651652899923, + 58.21798141355221 + ], "properties": { - "start_datetime": "1900-01-01", + "datetime": null, + "start_datetime": "1900-01-01T00:00:00Z", "end_datetime": "9999-12-31T23:59:59Z", "mlm:name": "Resnet-18 Sentinel-2 ALL MOCO", "mlm:tasks": [ @@ -230,6 +242,216 @@ ], "post_processing_function": null } + ], + "eo:bands": [ + { + "name": "coastal", + "common_name": "coastal", + "description": "Coastal aerosol (band 1)", + "center_wavelength": 0.443, + "full_width_half_max": 0.027 + }, + { + "name": "blue", + "common_name": "blue", + "description": "Blue (band 2)", + "center_wavelength": 0.49, + "full_width_half_max": 0.098 + }, + { + "name": "green", + "common_name": "green", + "description": "Green (band 3)", + "center_wavelength": 0.56, + "full_width_half_max": 0.045 + }, + { + "name": "red", + "common_name": "red", + "description": "Red (band 4)", + "center_wavelength": 0.665, + "full_width_half_max": 0.038 + }, + { + "name": "rededge1", + "common_name": "rededge", + "description": "Red edge 1 (band 5)", + "center_wavelength": 0.704, + "full_width_half_max": 0.019 + }, + { + "name": "rededge2", + "common_name": "rededge", + "description": "Red edge 2 (band 6)", + "center_wavelength": 0.74, + "full_width_half_max": 0.018 + }, + { + "name": "rededge3", + "common_name": "rededge", + "description": "Red edge 3 (band 7)", + "center_wavelength": 0.783, + "full_width_half_max": 0.028 + }, + { + "name": "nir", + "common_name": "nir", + "description": "NIR 1 (band 8)", + "center_wavelength": 0.842, + "full_width_half_max": 0.145 + }, + { + "name": "nir08", + "common_name": "nir08", + "description": "NIR 2 (band 8A)", + "center_wavelength": 0.865, + "full_width_half_max": 0.033 + }, + { + "name": "nir09", + "common_name": "nir09", + "description": "NIR 3 (band 9)", + "center_wavelength": 0.945, + "full_width_half_max": 0.026 + }, + { + "name": "cirrus", + "common_name": "cirrus", + "description": "SWIR - Cirrus (band 10)", + "center_wavelength": 1.375, + "full_width_half_max": 0.026 + }, + { + "name": "swir16", + "common_name": "swir16", + "description": "SWIR 1 (band 11)", + "center_wavelength": 1.61, + "full_width_half_max": 0.143 + }, + { + "name": "swir22", + "common_name": "swir22", + "description": "SWIR 2 (band 12)", + "center_wavelength": 2.19, + "full_width_half_max": 0.242 + } + ], + "raster:bands": [ + { + "name": "coastal", + "common_name": "coastal", + "description": "Coastal aerosol (band 1)", + "center_wavelength": 0.443, + "full_width_half_max": 0.027 + }, + { + "nodata": 0, + "data_type": "uint16", + "bits_per_sample": 15, + "spatial_resolution": 10, + "scale": 0.0001, + "offset": 0, + "unit": "m" + }, + { + "nodata": 0, + "data_type": "uint16", + "bits_per_sample": 15, + "spatial_resolution": 10, + "scale": 0.0001, + "offset": 0, + "unit": "m" + }, + { + "nodata": 0, + "data_type": "uint16", + "bits_per_sample": 15, + "spatial_resolution": 10, + "scale": 0.0001, + "offset": 0, + "unit": "m" + }, + { + "nodata": 0, + "data_type": "uint16", + "bits_per_sample": 15, + "spatial_resolution": 20, + "scale": 0.0001, + "offset": 0, + "unit": "m" + }, + { + "nodata": 0, + "data_type": "uint16", + "bits_per_sample": 15, + "spatial_resolution": 20, + "scale": 0.0001, + "offset": 0, + "unit": "m" + }, + { + "nodata": 0, + "data_type": "uint16", + "bits_per_sample": 15, + "spatial_resolution": 20, + "scale": 0.0001, + "offset": 0, + "unit": "m" + }, + { + "nodata": 0, + "data_type": "uint16", + "bits_per_sample": 15, + "spatial_resolution": 10, + "scale": 0.0001, + "offset": 0, + "unit": "m" + }, + { + "nodata": 0, + "data_type": "uint16", + "bits_per_sample": 15, + "spatial_resolution": 20, + "scale": 0.0001, + "offset": 0, + "unit": "m" + }, + { + "nodata": 0, + "data_type": "uint16", + "bits_per_sample": 15, + "spatial_resolution": 60, + "scale": 0.0001, + "offset": 0, + "unit": "m" + }, + { + "nodata": 0, + "data_type": "uint16", + "bits_per_sample": 15, + "spatial_resolution": 60, + "scale": 0.0001, + "offset": 0, + "unit": "m" + }, + { + "nodata": 0, + "data_type": "uint16", + "bits_per_sample": 15, + "spatial_resolution": 20, + "scale": 0.0001, + "offset": 0, + "unit": "m" + }, + { + "nodata": 0, + "data_type": "uint16", + "bits_per_sample": 15, + "spatial_resolution": 20, + "scale": 0.0001, + "offset": 0, + "unit": "m" + } ] }, "links": [ @@ -261,15 +483,5 @@ "metadata" ] } - }, - "datetime": null, - "bbox": [ - -7.882190080512502, - 37.13739173208318, - 27.911651652899923, - 58.21798141355221 - ], - "stac_extensions": [ - "https://raw.githubusercontent.com/crim-ca/dlm-extension/main/json-schema/schema.json" - ] + } } diff --git a/json-schema/schema.json b/json-schema/schema.json index bf56f44..ed79f02 100644 --- a/json-schema/schema.json +++ b/json-schema/schema.json @@ -575,6 +575,10 @@ } ] }, + "InputStatistics": { + "$comment": "MLM statistics for the specific input relevant for normalization for ML features.", + "$ref": "https://stac-extensions.github.io/raster/v1.1.0/schema.json#/definitions/bands/items/properties/statistics" + }, "ProcessingExpression": { "oneOf": [ { From 269bd734721ae2306fcbe177369f3cbf0813c241 Mon Sep 17 00:00:00 2001 From: Francis Charette-Migneault Date: Thu, 4 Apr 2024 18:19:06 -0400 Subject: [PATCH 084/112] update pydantic models with new json-schema fields --- json-schema/schema.json | 6 +- stac_model/base.py | 66 ++++++++++++++++++++ stac_model/examples.py | 110 +++++++++++++++++++--------------- stac_model/input.py | 65 ++++++++++++-------- stac_model/output.py | 129 ++++++++++++++++++++++++++++------------ stac_model/runtime.py | 18 +++--- stac_model/schema.py | 57 ++++++++---------- 7 files changed, 299 insertions(+), 152 deletions(-) create mode 100644 stac_model/base.py diff --git a/json-schema/schema.json b/json-schema/schema.json index ed79f02..7578e74 100644 --- a/json-schema/schema.json +++ b/json-schema/schema.json @@ -539,7 +539,11 @@ ] }, "NormalizeClip": { - + "type": "array", + "minItems": 1, + "items": { + "type": "number" + } }, "ResizeType": { "oneOf": [ diff --git a/stac_model/base.py b/stac_model/base.py new file mode 100644 index 0000000..4f4235d --- /dev/null +++ b/stac_model/base.py @@ -0,0 +1,66 @@ +from enum import Enum +from typing import Any, Literal, Union, TypeAlias + +from pydantic import BaseModel + + +DataType: TypeAlias = Literal[ + "uint8", + "uint16", + "uint32", + "uint64", + "int8", + "int16", + "int32", + "int64", + "float16", + "float32", + "float64", + "cint16", + "cint32", + "cfloat32", + "cfloat64", + "other" +] + + +class TaskEnum(str, Enum): + REGRESSION = "regression" + CLASSIFICATION = "classification" + SCENE_CLASSIFICATION = "scene-classification" + DETECTION = "detection" + OBJECT_DETECTION = "object-detection" + SEGMENTATION = "segmentation" + SEMANTIC_SEGMENTATION = "semantic-segmentation" + INSTANCE_SEGMENTATION = "instance-segmentation" + PANOPTIC_SEGMENTATION = "panoptic-segmentation" + SIMILARITY_SEARCH = "similarity-search" + GENERATIVE = "generative" + IMAGE_CAPTIONING = "image-captioning" + SUPER_RESOLUTION = "super-resolution" + + +ModelTaskNames: TypeAlias = Literal[ + "regression", + "classification", + "scene-classification", + "detection", + "object-detection", + "segmentation", + "semantic-segmentation", + "instance-segmentation", + "panoptic-segmentation", + "similarity-search", + "generative", + "image-captioning", + "super-resolution" +] + + +ModelTask = Union[ModelTaskNames, TaskEnum] + + +class ProcessingExpression(BaseModel): + # FIXME: should use 'pystac' reference, but 'processing' extension is not implemented yet! + format: str + expression: Any diff --git a/stac_model/examples.py b/stac_model/examples.py index 8e0ede4..9747086 100644 --- a/stac_model/examples.py +++ b/stac_model/examples.py @@ -1,15 +1,15 @@ import pystac import json import shapely +from stac_model.base import ProcessingExpression +from stac_model.input import ModelInput +from stac_model.output import ModelOutput, ModelResult from stac_model.schema import ( Asset, - ClassObject, InputArray, + MLMClassification, MLModelExtension, MLModelProperties, - ModelInput, - ModelOutput, - ResultArray, Runtime, Statistics, ) @@ -17,7 +17,14 @@ def eurosat_resnet() -> MLModelExtension[pystac.Item]: input_array = InputArray( - shape=[-1, 13, 64, 64], dim_order="bchw", data_type="float32" + shape=[-1, 13, 64, 64], + dim_order=[ + "batch", + "channel", + "height", + "width" + ], + data_type="float32", ) band_names = [ "B01", @@ -69,29 +76,34 @@ def eurosat_resnet() -> MLModelExtension[pystac.Item]: input = ModelInput( name="13 Band Sentinel-2 Batch", bands=band_names, - input_array=input_array, + input=input_array, norm_by_channel=True, - norm_type="z_score", - resize_type="none", + norm_type="z-score", + resize_type=None, statistics=stats, - pre_processing_function="torchgeo.datamodules.eurosat.EuroSATDataModule.collate_fn", # noqa: E501 - ) - runtime = Runtime( - framework="torch", - version="2.1.2+cu121", - asset=Asset(title = "Pytorch weights checkpoint", description="A Resnet-18 classification model trained on normalized Sentinel-2 imagery with Eurosat landcover labels with torchgeo", # noqa: E501 - type=".pth", roles=["weights"], href="https://huggingface.co/torchgeo/resnet18_sentinel2_all_moco/resolve/main/resnet18_sentinel2_all_moco-59bfdff9.pth" # noqa: E501 - ), - source_code=Asset( - href="https://github.com/microsoft/torchgeo/blob/61efd2e2c4df7ebe3bd03002ebbaeaa3cfe9885a/torchgeo/models/resnet.py#L207" # noqa: E501 - ), - accelerator="cuda", - accelerator_constrained=False, - hardware_summary="Unknown", - commit_hash="61efd2e2c4df7ebe3bd03002ebbaeaa3cfe9885a", + pre_processing_function=ProcessingExpression( + format="python", + expression="torchgeo.datamodules.eurosat.EuroSATDataModule.collate_fn" + ), # noqa: E501 ) - result_array = ResultArray( - shape=[-1, 10], dim_order=["batch", "class"], data_type="float32" + # runtime = Runtime( + # framework="torch", + # version="2.1.2+cu121", + # asset=Asset(title = "Pytorch weights checkpoint", description="A Resnet-18 classification model trained on normalized Sentinel-2 imagery with Eurosat landcover labels with torchgeo", # noqa: E501 + # type=".pth", roles=["weights"], href="https://huggingface.co/torchgeo/resnet18_sentinel2_all_moco/resolve/main/resnet18_sentinel2_all_moco-59bfdff9.pth" # noqa: E501 + # ), + # source_code=Asset( + # href="https://github.com/microsoft/torchgeo/blob/61efd2e2c4df7ebe3bd03002ebbaeaa3cfe9885a/torchgeo/models/resnet.py#L207" # noqa: E501 + # ), + # accelerator="cuda", + # accelerator_constrained=False, + # hardware_summary="Unknown", + # commit_hash="61efd2e2c4df7ebe3bd03002ebbaeaa3cfe9885a", + # ) + result_array = ModelResult( + shape=[-1, 10], + dim_order=["batch", "class"], + data_type="float32" ) class_map = { "Annual Crop": 0, @@ -106,30 +118,26 @@ def eurosat_resnet() -> MLModelExtension[pystac.Item]: "SeaLake": 9, } class_objects = [ - ClassObject(value=class_map[class_name], name=class_name) - for class_name in class_map + MLMClassification(value=class_value, name=class_name) + for class_name, class_value in class_map.items() ] output = ModelOutput( - task="classification", - classification_classes=class_objects, - output_shape=[-1, 10], - result_array=[result_array], + name="classification", + tasks={"classification"}, + classes=class_objects, + result=result_array, + post_processing_function=None, ) ml_model_meta = MLModelProperties( name="Resnet-18 Sentinel-2 ALL MOCO", - task="classification", + tasks={"classification"}, framework="pytorch", framework_version="2.1.2+cu121", file_size=43000000, memory_size=1, - summary=( - "Sourced from torchgeo python library," - "identifier is ResNet18_Weights.SENTINEL2_ALL_MOCO" - ), pretrained_source="EuroSat Sentinel-2", total_parameters=11_700_000, input=[input], - runtime=[runtime], output=[output], ) # TODO, this can't be serialized but pystac.item calls for a datetime @@ -138,26 +146,30 @@ def eurosat_resnet() -> MLModelExtension[pystac.Item]: start_datetime = "1900-01-01" end_datetime = None bbox = [ - -7.882190080512502, - 37.13739173208318, - 27.911651652899923, - 58.21798141355221 - ] - geometry = json.dumps(shapely.geometry.Polygon.from_bounds(*bbox).__geo_interface__, indent=2) - name = ( - "_".join(ml_model_meta.name.split(" ")).lower() - + f"_{ml_model_meta.task}".lower() - ) + -7.882190080512502, + 37.13739173208318, + 27.911651652899923, + 58.21798141355221 + ] + geometry = shapely.geometry.Polygon.from_bounds(*bbox).__geo_interface__ + name = "_".join(ml_model_meta.name.split(" ")).lower() item = pystac.Item( id=name, geometry=geometry, bbox=bbox, datetime=None, - properties={"start_datetime": start_datetime, "end_datetime": end_datetime}, + properties={ + "start_datetime": start_datetime, + "end_datetime": end_datetime, + "description": ( + "Sourced from torchgeo python library," + "identifier is ResNet18_Weights.SENTINEL2_ALL_MOCO" + ), + }, ) item.add_derived_from( "https://earth-search.aws.element84.com/v1/collections/sentinel-2-l2a" ) item_mlm = MLModelExtension.ext(item, add_if_missing=True) - item_mlm.apply(ml_model_meta.model_dump()) + item_mlm.apply(ml_model_meta.model_dump(by_alias=True)) return item_mlm diff --git a/stac_model/input.py b/stac_model/input.py index c453dbb..107fc5c 100644 --- a/stac_model/input.py +++ b/stac_model/input.py @@ -1,15 +1,14 @@ -from typing import Dict, List, Literal, Optional, Union +from typing import Any, List, Literal, Optional, Set, TypeAlias, Union -from pydantic import AnyUrl, BaseModel, Field +from pydantic import BaseModel, Field + +from stac_model.base import DataType, ProcessingExpression class InputArray(BaseModel): - shape: List[Union[int, float]] - dim_order: List[str] - data_type: str = Field( - ..., - pattern="^(uint8|uint16|uint32|uint64|int8|int16|int32|int64|float16|float32|float64|cint16|cint32|cfloat32|cfloat64|other)$", - ) + shape: List[Union[int, float]] = Field(..., min_items=1) + dim_order: List[str] = Field(..., min_items=1) + data_type: DataType class Statistics(BaseModel): @@ -24,29 +23,45 @@ class Statistics(BaseModel): class Band(BaseModel): name: str description: Optional[str] = None - nodata: float | int | str + nodata: Union[float, int, str] data_type: str unit: Optional[str] = None +NormalizeType: TypeAlias = Optional[Literal[ + "min-max", + "z-score", + "l1", + "l2", + "l2sqr", + "hamming", + "hamming2", + "type-mask", + "relative", + "inf" +]] + +ResizeType: TypeAlias = Optional[Literal[ + "crop", + "pad", + "interpolation-nearest", + "interpolation-linear", + "interpolation-cubic", + "interpolation-area", + "interpolation-lanczos4", + "interpolation-max", + "wrap-fill-outliers", + "wrap-inverse-map" +]] + + class ModelInput(BaseModel): name: str bands: List[str] - input_array: InputArray + input: InputArray norm_by_channel: bool = None - norm_type: Literal[ - "min_max", - "z_score", - "max_norm", - "mean_norm", - "unit_variance", - "norm_with_clip", - "none", - ] = None - resize_type: Literal["crop", "pad", "interpolate", "none"] = None - parameters: Optional[ - Dict[str, Union[int, str, bool, List[Union[int, str, bool]]]] - ] = None + norm_type: NormalizeType = None + norm_clip: Optional[List[Union[float, int]]] = None + resize_type: ResizeType = None statistics: Optional[Union[Statistics, List[Statistics]]] = None - norm_with_clip_values: Optional[List[Union[float, int]]] = None - pre_processing_function: Optional[str | AnyUrl] = None + pre_processing_function: Optional[ProcessingExpression] = None diff --git a/stac_model/output.py b/stac_model/output.py index 0b2e919..f08cbb2 100644 --- a/stac_model/output.py +++ b/stac_model/output.py @@ -1,43 +1,96 @@ -from enum import Enum -from typing import List, Optional, Union - -from pydantic import BaseModel, Field - - -class TaskEnum(str, Enum): - regression = "regression" - classification = "classification" - object_detection = "object detection" - semantic_segmentation = "semantic segmentation" - instance_segmentation = "instance segmentation" - panoptic_segmentation = "panoptic segmentation" - multi_modal = "multi-modal" - similarity_search = "similarity search" - image_captioning = "image captioning" - generative = "generative" - super_resolution = "super resolution" - - -class ResultArray(BaseModel): - shape: List[Union[int, float]] - dim_order: List[str] - data_type: str = Field( - ..., - pattern="^(uint8|uint16|uint32|uint64|int8|int16|int32|int64|float16|float32|float64)$", - ) +from typing import Annotated, Any, Dict, List, Optional, Set, TypeAlias, Union +from typing_extensions import NotRequired, TypedDict +from pystac.extensions.classification import Classification +from pydantic import AliasChoices, BaseModel, ConfigDict, Field, PlainSerializer, model_serializer -class ClassObject(BaseModel): - value: int - name: str - description: Optional[str] = None - title: Optional[str] = None - color_hint: Optional[str] = None - nodata: Optional[bool] = False +from stac_model.base import DataType, ModelTask, ProcessingExpression + + +class ModelResult(BaseModel): + shape: List[Union[int, float]] = Field(..., min_items=1) + dim_order: List[str] = Field(..., min_items=1) + data_type: DataType + + +# MLMClassification: TypeAlias = Annotated[ +# Classification, +# PlainSerializer( +# lambda x: x.to_dict(), +# when_used="json", +# return_type=TypedDict( +# "Classification", +# { +# "value": int, +# "name": str, +# "description": NotRequired[str], +# "color_hint": NotRequired[str], +# } +# ) +# ) +# ] + + +class MLMClassification(BaseModel, Classification): + @model_serializer() + def model_dump(self, *_, **__) -> Dict[str, Any]: + return self.to_dict() + + def __init__( + self, + value: int, + description: Optional[str] = None, + name: Optional[str] = None, + color_hint: Optional[str] = None + ) -> None: + Classification.__init__(self, {}) + if not name and not description: + raise ValueError("Class name or description is required!") + self.apply( + value=value, + name=name or description, + description=description or name, + color_hint=color_hint, + ) + + def __hash__(self) -> int: + return sum(map(hash, self.to_dict().items())) + + def __setattr__(self, key: str, value: Any) -> None: + if key == "properties": + Classification.__setattr__(self, key, value) + else: + BaseModel.__setattr__(self, key, value) + + model_config = ConfigDict(arbitrary_types_allowed=True) + +# class ClassObject(BaseModel): +# value: int +# name: str +# description: Optional[str] = None +# title: Optional[str] = None +# color_hint: Optional[str] = None +# nodata: Optional[bool] = False class ModelOutput(BaseModel): - task: TaskEnum - result_array: Optional[List[ResultArray]] = None - classification_classes: Optional[List[ClassObject]] = None - post_processing_function: Optional[str] = None + name: str + tasks: Set[ModelTask] + result: ModelResult + + # NOTE: + # Although it is preferable to have 'Set' to avoid duplicate, + # it is more important to keep the order in this case, + # which we would lose with 'Set'. + # We also get some unhashable errors with 'Set', although 'MLMClassification' implements '__hash__'. + classes: List[MLMClassification] = Field( + alias="classification:classes", + validation_alias=AliasChoices("classification:classes", "classification_classes"), + exclude_unset=True, + exclude_defaults=True + ) + post_processing_function: Optional[ProcessingExpression] = None + + model_config = ConfigDict( + populate_by_name=True + ) diff --git a/stac_model/runtime.py b/stac_model/runtime.py index b1a564a..1c0491f 100644 --- a/stac_model/runtime.py +++ b/stac_model/runtime.py @@ -1,7 +1,7 @@ from enum import Enum from typing import List, Optional -from pydantic import AnyUrl, BaseModel, ConfigDict, FilePath +from pydantic import AnyUrl, BaseModel, ConfigDict, FilePath, Field class Asset(BaseModel): @@ -41,11 +41,13 @@ def __str__(self): class Runtime(BaseModel): - asset: Asset - source_code: Asset - accelerator: AcceleratorEnum - accelerator_constrained: bool - hardware_summary: str - container: Optional[Container] = None - commit_hash: Optional[str] = None + framework: str + framework_version: str + file_size: int = Field(alias="file:size") + memory_size: int batch_size_suggestion: Optional[int] = None + + accelerator: Optional[AcceleratorEnum] = Field(exclude_unset=True, default=None) + accelerator_constrained: bool = Field(exclude_unset=True, default=False) + accelerator_summary: str = Field(exclude_unset=True, exclude_defaults=True, default="") + accelerator_count: int = Field(minimum=1, exclude_unset=True, exclude_defaults=True, default=-1) diff --git a/stac_model/schema.py b/stac_model/schema.py index 4f41603..13f13b9 100644 --- a/stac_model/schema.py +++ b/stac_model/schema.py @@ -7,6 +7,7 @@ List, Literal, Optional, + Set, TypeVar, Union, cast, @@ -14,7 +15,7 @@ ) import pystac -from pydantic import BaseModel, ConfigDict +from pydantic import BaseModel, ConfigDict, Field from pydantic.fields import FieldInfo from pystac.extensions import item_assets from pystac.extensions.base import ( @@ -24,9 +25,10 @@ SummariesExtension, ) -from .input import Band, InputArray, ModelInput, Statistics -from .output import ClassObject, ModelOutput, ResultArray, TaskEnum -from .runtime import Asset, Container, Runtime +from stac_model.base import DataType, ModelTask +from stac_model.input import Band, InputArray, ModelInput, Statistics +from stac_model.output import MLMClassification, ModelOutput +from stac_model.runtime import Asset, Container, Runtime T = TypeVar( "T", pystac.Collection, pystac.Item, pystac.Asset, item_assets.AssetDefinition @@ -41,25 +43,20 @@ def mlm_prefix_adder(field_name: str) -> str: return "mlm:" + field_name -class MLModelProperties(BaseModel): +class MLModelProperties(Runtime): name: str - task: TaskEnum - framework: str - framework_version: str - file_size: int - memory_size: int + tasks: Set[ModelTask] input: List[ModelInput] output: List[ModelOutput] - runtime: List[Runtime] + total_parameters: int - pretrained_source: str - summary: str - parameters: Optional[ - Dict[str, Union[int, str, bool, List[Union[int, str, bool]]]] - ] = None # noqa: E501 + pretrained: bool = Field(exclude_unset=True, default=True) + pretrained_source: Optional[str] = Field(exclude_unset=True) model_config = ConfigDict( - alias_generator=mlm_prefix_adder, populate_by_name=True, extra="ignore" + alias_generator=mlm_prefix_adder, + populate_by_name=True, + extra="ignore" ) @@ -221,17 +218,15 @@ def __init__(self, collection: pystac.Collection): self.collection = collection -__all__ = [ - "MLModelExtension", - "ModelInput", - "InputArray", - "Band", - "Statistics", - "ModelOutput", - "ClassObject", - "Asset", - "ResultArray", - "Runtime", - "Container", - "Asset", -] +# __all__ = [ +# "MLModelExtension", +# "ModelInput", +# "InputArray", +# "Band", +# "Statistics", +# "ModelOutput", +# "Asset", +# "Runtime", +# "Container", +# "Asset", +# ] From 03e7e06ccc8fbbed284be5192351d2226dfd2fe6 Mon Sep 17 00:00:00 2001 From: Francis Charette-Migneault Date: Thu, 4 Apr 2024 19:47:42 -0400 Subject: [PATCH 085/112] add details & example with 'eo:bands' for special JSON schema consideration (relates to https://github.com/stac-extensions/eo/issues/12)" --- README.md | 15 + examples/example.json | 109 +------ examples/example_eo_bands.json | 506 +++++++++++++++++++++++++++++++++ json-schema/schema.json | 25 +- tests/conftest.py | 8 +- tests/test_schema.py | 22 +- 6 files changed, 577 insertions(+), 108 deletions(-) create mode 100644 examples/example_eo_bands.json diff --git a/README.md b/README.md index 1cbff67..cf595df 100644 --- a/README.md +++ b/README.md @@ -226,6 +226,21 @@ representing bands information, including notably the `nodata` value, the `data_type` (see also [Data Type Enum](#data-type-enum)), and [Common Band Names][stac-band-names]. +> [!NOTE] +> Due to how the schema for [`eo:bands`][stac-eo-band] is defined, it is not sufficient to *only* provide +> the `eo:bands` property at the STAC Item level. The schema validation of the EO extension explicitly looks +> for a corresponding set of bands under an Asset, and if none is found, it disallows `eo:bands` in the Item properties. +> Therefore, `eo:bands` should either be specified *only* under the Asset containing the `mlm:model` role +> (see [Model Asset](#model-asset)), or define them *both* under the Asset and Item properties. If the second +> approach is selected, it is recommended that the `eo:bands` under the Asset contains only the `name` or the +> `common_name` property, such that all other details about the bands are defined at the Item level. +>

+> For more details, refer to [stac-extensions/eo#12](https://github.com/stac-extensions/eo/issues/12). +>
+> For an example, please refer to [examples/example_eo_bands.json](examples/example_eo_bands.json). +> Notably in this example, the `assets.weights.eo:bands` property provides the `name` to fulfill the Asset requirement, +> while all additional band details are provided in `properties.eo:bands`. + Only bands used as input to the model should be included in the MLM `bands` field. To avoid duplicating the information, MLM only uses the `name` of whichever "Band Object" is defined in the STAC Item. diff --git a/examples/example.json b/examples/example.json index f37231c..cb3a41a 100644 --- a/examples/example.json +++ b/examples/example.json @@ -2,8 +2,9 @@ "stac_version": "1.0.0", "stac_extensions": [ "https://stac-extensions.github.io/mlm/v1.0.0/schema.json", - "https://stac-extensions.github.io/eo/v1.1.0/schema.json", - "https://stac-extensions.github.io/raster/v1.1.0/schema.json" + "https://stac-extensions.github.io/raster/v1.1.0/schema.json", + "https://stac-extensions.github.io/file/v1.0.0/schema.json", + "https://stac-extensions.github.io/ml-aoi/v0.2.0/schema.json" ], "type": "Feature", "id": "resnet-18_sentinel-2_all_moco_classification", @@ -41,6 +42,7 @@ 58.21798141355221 ], "properties": { + "description": "Sourced from torchgeo python library, identifier is ResNet18_Weights.SENTINEL2_ALL_MOCO", "datetime": null, "start_datetime": "1900-01-01T00:00:00Z", "end_datetime": "9999-12-31T23:59:59Z", @@ -50,11 +52,10 @@ ], "mlm:framework": "pytorch", "mlm:framework_version": "2.1.2+cu121", - "mlm:file_size": 43000000, + "file:size": 43000000, "mlm:memory_size": 1, "mlm:total_parameters": 11700000, "mlm:pretrained_source": "EuroSat Sentinel-2", - "mlm:summary": "Sourced from torchgeo python library, identifier is ResNet18_Weights.SENTINEL2_ALL_MOCO", "mlm:accelerator": "cuda", "mlm:accelerator_constrained": false, "mlm:accelerator_summary": "Unknown", @@ -243,99 +244,6 @@ "post_processing_function": null } ], - "eo:bands": [ - { - "name": "coastal", - "common_name": "coastal", - "description": "Coastal aerosol (band 1)", - "center_wavelength": 0.443, - "full_width_half_max": 0.027 - }, - { - "name": "blue", - "common_name": "blue", - "description": "Blue (band 2)", - "center_wavelength": 0.49, - "full_width_half_max": 0.098 - }, - { - "name": "green", - "common_name": "green", - "description": "Green (band 3)", - "center_wavelength": 0.56, - "full_width_half_max": 0.045 - }, - { - "name": "red", - "common_name": "red", - "description": "Red (band 4)", - "center_wavelength": 0.665, - "full_width_half_max": 0.038 - }, - { - "name": "rededge1", - "common_name": "rededge", - "description": "Red edge 1 (band 5)", - "center_wavelength": 0.704, - "full_width_half_max": 0.019 - }, - { - "name": "rededge2", - "common_name": "rededge", - "description": "Red edge 2 (band 6)", - "center_wavelength": 0.74, - "full_width_half_max": 0.018 - }, - { - "name": "rededge3", - "common_name": "rededge", - "description": "Red edge 3 (band 7)", - "center_wavelength": 0.783, - "full_width_half_max": 0.028 - }, - { - "name": "nir", - "common_name": "nir", - "description": "NIR 1 (band 8)", - "center_wavelength": 0.842, - "full_width_half_max": 0.145 - }, - { - "name": "nir08", - "common_name": "nir08", - "description": "NIR 2 (band 8A)", - "center_wavelength": 0.865, - "full_width_half_max": 0.033 - }, - { - "name": "nir09", - "common_name": "nir09", - "description": "NIR 3 (band 9)", - "center_wavelength": 0.945, - "full_width_half_max": 0.026 - }, - { - "name": "cirrus", - "common_name": "cirrus", - "description": "SWIR - Cirrus (band 10)", - "center_wavelength": 1.375, - "full_width_half_max": 0.026 - }, - { - "name": "swir16", - "common_name": "swir16", - "description": "SWIR 1 (band 11)", - "center_wavelength": 1.61, - "full_width_half_max": 0.143 - }, - { - "name": "swir22", - "common_name": "swir22", - "description": "SWIR 2 (band 12)", - "center_wavelength": 2.19, - "full_width_half_max": 0.242 - } - ], "raster:bands": [ { "name": "coastal", @@ -458,7 +366,8 @@ { "rel": "derived_from", "href": "https://earth-search.aws.element84.com/v1/collections/sentinel-2-l2a", - "type": "application/json" + "type": "application/json", + "ml-aoi:split": "train" } ], "assets": { @@ -466,7 +375,7 @@ "href": "https://huggingface.co/torchgeo/resnet18_sentinel2_all_moco/resolve/main/resnet18_sentinel2_all_moco-59bfdff9.pth", "title": "Pytorch weights checkpoint", "description": "A Resnet-18 classification model trained on normalized Sentinel-2 imagery with Eurosat landcover labels with torchgeo", - "type": ".pth", + "type": "application/octet-stream; application=pytorch", "roles": [ "mlm:model", "mlm:weights" @@ -476,7 +385,7 @@ "href": "https://github.com/microsoft/torchgeo/blob/61efd2e2c4df7ebe3bd03002ebbaeaa3cfe9885a/torchgeo/models/resnet.py#L207", "title": null, "description": null, - "type": null, + "type": "text/x-python", "roles": [ "mlm:model", "code", diff --git a/examples/example_eo_bands.json b/examples/example_eo_bands.json new file mode 100644 index 0000000..adb29d2 --- /dev/null +++ b/examples/example_eo_bands.json @@ -0,0 +1,506 @@ +{ + "stac_version": "1.0.0", + "stac_extensions": [ + "https://stac-extensions.github.io/mlm/v1.0.0/schema.json", + "https://stac-extensions.github.io/eo/v1.1.0/schema.json", + "https://stac-extensions.github.io/raster/v1.1.0/schema.json", + "https://stac-extensions.github.io/file/v1.0.0/schema.json", + "https://stac-extensions.github.io/ml-aoi/v0.2.0/schema.json" + ], + "type": "Feature", + "id": "resnet-18_sentinel-2_all_moco_classification", + "geometry": { + "type": "Polygon", + "coordinates": [ + [ + [ + -7.882190080512502, + 37.13739173208318 + ], + [ + -7.882190080512502, + 58.21798141355221 + ], + [ + 27.911651652899925, + 58.21798141355221 + ], + [ + 27.911651652899925, + 37.13739173208318 + ], + [ + -7.882190080512502, + 37.13739173208318 + ] + ] + ] + }, + "bbox": [ + -7.882190080512502, + 37.13739173208318, + 27.911651652899923, + 58.21798141355221 + ], + "properties": { + "description": "Sourced from torchgeo python library, identifier is ResNet18_Weights.SENTINEL2_ALL_MOCO", + "datetime": null, + "start_datetime": "1900-01-01T00:00:00Z", + "end_datetime": "9999-12-31T23:59:59Z", + "mlm:name": "Resnet-18 Sentinel-2 ALL MOCO", + "mlm:tasks": [ + "classification" + ], + "mlm:framework": "pytorch", + "mlm:framework_version": "2.1.2+cu121", + "file:size": 43000000, + "mlm:memory_size": 1, + "mlm:total_parameters": 11700000, + "mlm:pretrained_source": "EuroSat Sentinel-2", + "mlm:accelerator": "cuda", + "mlm:accelerator_constrained": false, + "mlm:accelerator_summary": "Unknown", + "mlm:batch_size_suggestion": null, + "mlm:input": [ + { + "name": "13 Band Sentinel-2 Batch", + "bands": [ + "B01", + "B02", + "B03", + "B04", + "B05", + "B06", + "B07", + "B08", + "B8A", + "B09", + "B10", + "B11", + "B12" + ], + "input": { + "shape": [ + -1, + 13, + 64, + 64 + ], + "dim_order": [ + "batch", + "channel", + "height", + "width" + ], + "data_type": "float32" + }, + "norm_by_channel": true, + "norm_type": "z-score", + "resize_type": null, + "parameters": null, + "statistics": { + "minimum": null, + "maximum": null, + "mean": [ + 1354.40546513, + 1118.24399958, + 1042.92983953, + 947.62620298, + 1199.47283961, + 1999.79090914, + 2369.22292565, + 2296.82608323, + 732.08340178, + 12.11327804, + 1819.01027855, + 1118.92391149, + 2594.14080798 + ], + "stddev": [ + 245.71762908, + 333.00778264, + 395.09249139, + 593.75055589, + 566.4170017, + 861.18399006, + 1086.63139075, + 1117.98170791, + 404.91978886, + 4.77584468, + 1002.58768311, + 761.30323499, + 1231.58581042 + ], + "count": null, + "valid_percent": null + }, + "norm_with_clip_values": null, + "pre_processing_function": { + "format": "python", + "expression": "torchgeo.datamodules.eurosat.EuroSATDataModule.collate_fn" + } + } + ], + "mlm:output": [ + { + "name": "classification", + "tasks": [ + "classification" + ], + "result": [ + { + "shape": [ + -1, + 10 + ], + "dim_order": [ + "batch", + "class" + ], + "data_type": "float32" + } + ], + "classification_classes": [ + { + "value": 0, + "name": "Annual Crop", + "description": null, + "title": null, + "color_hint": null, + "nodata": false + }, + { + "value": 1, + "name": "Forest", + "description": null, + "title": null, + "color_hint": null, + "nodata": false + }, + { + "value": 2, + "name": "Herbaceous Vegetation", + "description": null, + "title": null, + "color_hint": null, + "nodata": false + }, + { + "value": 3, + "name": "Highway", + "description": null, + "title": null, + "color_hint": null, + "nodata": false + }, + { + "value": 4, + "name": "Industrial Buildings", + "description": null, + "title": null, + "color_hint": null, + "nodata": false + }, + { + "value": 5, + "name": "Pasture", + "description": null, + "title": null, + "color_hint": null, + "nodata": false + }, + { + "value": 6, + "name": "Permanent Crop", + "description": null, + "title": null, + "color_hint": null, + "nodata": false + }, + { + "value": 7, + "name": "Residential Buildings", + "description": null, + "title": null, + "color_hint": null, + "nodata": false + }, + { + "value": 8, + "name": "River", + "description": null, + "title": null, + "color_hint": null, + "nodata": false + }, + { + "value": 9, + "name": "SeaLake", + "description": null, + "title": null, + "color_hint": null, + "nodata": false + } + ], + "post_processing_function": null + } + ], + "eo:bands": [ + { + "name": "coastal", + "common_name": "coastal", + "description": "Coastal aerosol (band 1)", + "center_wavelength": 0.443, + "full_width_half_max": 0.027 + }, + { + "name": "blue", + "common_name": "blue", + "description": "Blue (band 2)", + "center_wavelength": 0.49, + "full_width_half_max": 0.098 + }, + { + "name": "green", + "common_name": "green", + "description": "Green (band 3)", + "center_wavelength": 0.56, + "full_width_half_max": 0.045 + }, + { + "name": "red", + "common_name": "red", + "description": "Red (band 4)", + "center_wavelength": 0.665, + "full_width_half_max": 0.038 + }, + { + "name": "rededge1", + "common_name": "rededge", + "description": "Red edge 1 (band 5)", + "center_wavelength": 0.704, + "full_width_half_max": 0.019 + }, + { + "name": "rededge2", + "common_name": "rededge", + "description": "Red edge 2 (band 6)", + "center_wavelength": 0.74, + "full_width_half_max": 0.018 + }, + { + "name": "rededge3", + "common_name": "rededge", + "description": "Red edge 3 (band 7)", + "center_wavelength": 0.783, + "full_width_half_max": 0.028 + }, + { + "name": "nir", + "common_name": "nir", + "description": "NIR 1 (band 8)", + "center_wavelength": 0.842, + "full_width_half_max": 0.145 + }, + { + "name": "nir08", + "common_name": "nir08", + "description": "NIR 2 (band 8A)", + "center_wavelength": 0.865, + "full_width_half_max": 0.033 + }, + { + "name": "nir09", + "common_name": "nir09", + "description": "NIR 3 (band 9)", + "center_wavelength": 0.945, + "full_width_half_max": 0.026 + }, + { + "name": "cirrus", + "common_name": "cirrus", + "description": "SWIR - Cirrus (band 10)", + "center_wavelength": 1.375, + "full_width_half_max": 0.026 + }, + { + "name": "swir16", + "common_name": "swir16", + "description": "SWIR 1 (band 11)", + "center_wavelength": 1.61, + "full_width_half_max": 0.143 + }, + { + "name": "swir22", + "common_name": "swir22", + "description": "SWIR 2 (band 12)", + "center_wavelength": 2.19, + "full_width_half_max": 0.242 + } + ], + "raster:bands": [ + { + "name": "coastal", + "common_name": "coastal", + "description": "Coastal aerosol (band 1)", + "center_wavelength": 0.443, + "full_width_half_max": 0.027 + }, + { + "nodata": 0, + "data_type": "uint16", + "bits_per_sample": 15, + "spatial_resolution": 10, + "scale": 0.0001, + "offset": 0, + "unit": "m" + }, + { + "nodata": 0, + "data_type": "uint16", + "bits_per_sample": 15, + "spatial_resolution": 10, + "scale": 0.0001, + "offset": 0, + "unit": "m" + }, + { + "nodata": 0, + "data_type": "uint16", + "bits_per_sample": 15, + "spatial_resolution": 10, + "scale": 0.0001, + "offset": 0, + "unit": "m" + }, + { + "nodata": 0, + "data_type": "uint16", + "bits_per_sample": 15, + "spatial_resolution": 20, + "scale": 0.0001, + "offset": 0, + "unit": "m" + }, + { + "nodata": 0, + "data_type": "uint16", + "bits_per_sample": 15, + "spatial_resolution": 20, + "scale": 0.0001, + "offset": 0, + "unit": "m" + }, + { + "nodata": 0, + "data_type": "uint16", + "bits_per_sample": 15, + "spatial_resolution": 20, + "scale": 0.0001, + "offset": 0, + "unit": "m" + }, + { + "nodata": 0, + "data_type": "uint16", + "bits_per_sample": 15, + "spatial_resolution": 10, + "scale": 0.0001, + "offset": 0, + "unit": "m" + }, + { + "nodata": 0, + "data_type": "uint16", + "bits_per_sample": 15, + "spatial_resolution": 20, + "scale": 0.0001, + "offset": 0, + "unit": "m" + }, + { + "nodata": 0, + "data_type": "uint16", + "bits_per_sample": 15, + "spatial_resolution": 60, + "scale": 0.0001, + "offset": 0, + "unit": "m" + }, + { + "nodata": 0, + "data_type": "uint16", + "bits_per_sample": 15, + "spatial_resolution": 60, + "scale": 0.0001, + "offset": 0, + "unit": "m" + }, + { + "nodata": 0, + "data_type": "uint16", + "bits_per_sample": 15, + "spatial_resolution": 20, + "scale": 0.0001, + "offset": 0, + "unit": "m" + }, + { + "nodata": 0, + "data_type": "uint16", + "bits_per_sample": 15, + "spatial_resolution": 20, + "scale": 0.0001, + "offset": 0, + "unit": "m" + } + ] + }, + "links": [ + { + "rel": "derived_from", + "href": "https://earth-search.aws.element84.com/v1/collections/sentinel-2-l2a", + "type": "application/json", + "ml-aoi:split": "train" + } + ], + "assets": { + "weights": { + "href": "https://huggingface.co/torchgeo/resnet18_sentinel2_all_moco/resolve/main/resnet18_sentinel2_all_moco-59bfdff9.pth", + "title": "Pytorch weights checkpoint", + "description": "A Resnet-18 classification model trained on normalized Sentinel-2 imagery with Eurosat landcover labels with torchgeo", + "type": "application/octet-stream; application=pytorch", + "roles": [ + "mlm:model", + "mlm:weights" + ], + "$comment": "Following 'eo:bands' is required to fulfil schema validation of 'eo' extension.", + "eo:bands": [ + {"name": "coastal"}, + {"name": "blue"}, + {"name": "green"}, + {"name": "red"}, + {"name": "rededge1"}, + {"name": "rededge2"}, + {"name": "rededge3"}, + {"name": "nir"}, + {"name": "nir08"}, + {"name": "nir09"}, + {"name": "cirrus"}, + {"name": "swir16"}, + {"name": "swir22"} + ] + }, + "source_code": { + "href": "https://github.com/microsoft/torchgeo/blob/61efd2e2c4df7ebe3bd03002ebbaeaa3cfe9885a/torchgeo/models/resnet.py#L207", + "title": null, + "description": null, + "type": "text/x-python", + "roles": [ + "mlm:model", + "code", + "metadata" + ] + } + } +} diff --git a/json-schema/schema.json b/json-schema/schema.json index 7578e74..af3af2a 100644 --- a/json-schema/schema.json +++ b/json-schema/schema.json @@ -38,7 +38,15 @@ "assets": { "type": "object", "additionalProperties": { - "$ref": "#/$defs/fields" + "allOf": [ + { + "$ref": "#/$defs/fields" + }, + { + "$comment": "At least one Asset must provide the model definition.", + "$ref": "#/$defs/AssetModelRole" + } + ] } } } @@ -596,6 +604,21 @@ "DataType": { "$ref": "https://stac-extensions.github.io/raster/v1.1.0/schema.json#/definitions/bands/items/properties/data_type" }, + "AssetModelRole": { + "required": ["assets"], + "properties": { + "assets": { + "additionalProperties": { + "required": ["roles"], + "properties": { + "roles": { + "contains": "mlm:model" + } + } + } + } + } + }, "ModelBands": { "allOf": [ { diff --git a/tests/conftest.py b/tests/conftest.py index b33cce0..1c51fed 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -19,7 +19,7 @@ def mlm_schema() -> Dict[str, Any]: return json.load(schema_file) -@pytest.fixture(scope="session", autouse=True) +@pytest.fixture(scope="session") def mlm_validator( request: pytest.FixtureRequest, mlm_schema: Dict[str, Any], @@ -39,9 +39,9 @@ def mlm_validator( return validator -@pytest.fixture(scope="session", autouse=True) -def mlm_example() -> Dict[str, Any]: - with open(os.path.join(EXAMPLES_DIR, "example.json")) as example_file: +@pytest.fixture +def mlm_example(request) -> Dict[str, Any]: + with open(os.path.join(EXAMPLES_DIR, request.param)) as example_file: return json.load(example_file) diff --git a/tests/test_schema.py b/tests/test_schema.py index a25f69d..61f717a 100644 --- a/tests/test_schema.py +++ b/tests/test_schema.py @@ -1,10 +1,26 @@ +from typing import Any, Dict import pystac +import pytest +from stac_model.schema import SCHEMA_URI -def test_mlm_schema(mlm_validator, mlm_example): + +@pytest.mark.parametrize( + "mlm_example", # value passed to 'mlm_example' fixture + [ + "example.json", + "example_eo_bands.json", + ], + indirect=True, +) +def test_mlm_schema( + mlm_validator: pystac.validation.STACValidator, + mlm_example, +) -> None: mlm_item = pystac.Item.from_dict(mlm_example) - invalid = pystac.validation.validate(mlm_item, validator=mlm_validator) - assert not invalid + validated = pystac.validation.validate(mlm_item, validator=mlm_validator) + assert len(validated) >= len(mlm_item.stac_extensions) # extra STAC core schemas + assert SCHEMA_URI in validated def test_model_metadata_to_dict(eurosat_resnet): From 2d6c70b63738e62908eef432ad046f186835ed58 Mon Sep 17 00:00:00 2001 From: Francis Charette-Migneault Date: Thu, 4 Apr 2024 20:52:00 -0400 Subject: [PATCH 086/112] update examples working against JSON schema (except check for cross-bands [AnyBandsRef]) --- README.md | 2 +- examples/item_basic.json | 116 +++++++++ ...ample_eo_bands.json => item_eo_bands.json} | 3 +- examples/item_multi_io.json | 242 ++++++++++++++++++ .../{example.json => item_raster_bands.json} | 68 +---- json-schema/schema.json | 57 ++++- stac_model/input.py | 2 +- stac_model/runtime.py | 10 +- tests/test_schema.py | 6 +- 9 files changed, 428 insertions(+), 78 deletions(-) create mode 100644 examples/item_basic.json rename examples/{example_eo_bands.json => item_eo_bands.json} (99%) create mode 100644 examples/item_multi_io.json rename examples/{example.json => item_raster_bands.json} (86%) diff --git a/README.md b/README.md index cf595df..78e4553 100644 --- a/README.md +++ b/README.md @@ -68,7 +68,7 @@ The fields in the table below can be used in these parts of STAC documents: | mlm:name | string | **REQUIRED** A unique name for the model. This can include, but must be distinct, from simply naming the model architecture. If there is a publication or other published work related to the model, use the official name of the model. | | mlm:architecture | [Model Architecture](#model-architecture) string | **REQUIRED** A generic and well established architecture name of the model. | | mlm:tasks | \[[Task Enum](#task-enum)] | **REQUIRED** Specifies the Machine Learning tasks for which the model can be used for. If multi-tasks outputs are provided by distinct model heads, specify all available tasks under the main properties and specify respective tasks in each [Model Output Object](#model-output-object). | -| mlm:framework | string | **REQUIRED** Framework used to train the model (ex: PyTorch, TensorFlow). | +| mlm:framework | string | Framework used to train the model (ex: PyTorch, TensorFlow). | | mlm:framework_version | string | The `framework` library version. Some models require a specific version of the machine learning `framework` to run. | | mlm:memory_size | integer | The in-memory size of the model on the accelerator during inference (bytes). | | mlm:total_parameters | integer | Total number of model parameters, including trainable and non-trainable parameters. | diff --git a/examples/item_basic.json b/examples/item_basic.json new file mode 100644 index 0000000..0778163 --- /dev/null +++ b/examples/item_basic.json @@ -0,0 +1,116 @@ +{ + "stac_version": "1.0.0", + "stac_extensions": [ + "https://stac-extensions.github.io/mlm/v1.0.0/schema.json" + ], + "type": "Feature", + "id": "example-model", + "geometry": { + "type": "Polygon", + "coordinates": [ + [ + [ + -7.882190080512502, + 37.13739173208318 + ], + [ + -7.882190080512502, + 58.21798141355221 + ], + [ + 27.911651652899925, + 58.21798141355221 + ], + [ + 27.911651652899925, + 37.13739173208318 + ], + [ + -7.882190080512502, + 37.13739173208318 + ] + ] + ] + }, + "bbox": [ + -7.882190080512502, + 37.13739173208318, + 27.911651652899923, + 58.21798141355221 + ], + "properties": { + "datetime": null, + "start_datetime": "1900-01-01T00:00:00Z", + "end_datetime": "9999-12-31T23:59:59Z", + "mlm:name": "example-model", + "mlm:tasks": [ + "classification" + ], + "mlm:architecture": "ResNet", + "mlm:input": [ + { + "name": "Model with RGB input that does not refer to any band.", + "bands": [], + "input": { + "shape": [ + -1, + 3, + 64, + 64 + ], + "dim_order": [ + "batch", + "channel", + "height", + "width" + ], + "data_type": "float32" + } + } + ], + "mlm:output": [ + { + "name": "classification", + "tasks": [ + "classification" + ], + "result": { + "shape": [ + -1, + 1 + ], + "dim_order": [ + "batch", + "class" + ], + "data_type": "uint8" + }, + "classification_classes": [ + { + "value": 0, + "name": "BACKGROUND", + "description": "Background non-city.", + "color_hint": [0, 0, 0] + }, + { + "value": 1, + "name": "CITY", + "description": "A city is detected.", + "color_hint": [0, 0, 255] + } + ] + } + ] + }, + "assets": { + "model": { + "href": "https://huggingface.co/example/model-card", + "title": "Pytorch weights checkpoint", + "description": "Example model.", + "type": "text/html", + "roles": [ + "mlm:model" + ] + } + } +} diff --git a/examples/example_eo_bands.json b/examples/item_eo_bands.json similarity index 99% rename from examples/example_eo_bands.json rename to examples/item_eo_bands.json index adb29d2..60a5868 100644 --- a/examples/example_eo_bands.json +++ b/examples/item_eo_bands.json @@ -51,6 +51,7 @@ "mlm:tasks": [ "classification" ], + "mlm:architecture": "ResNet", "mlm:framework": "pytorch", "mlm:framework_version": "2.1.2+cu121", "file:size": 43000000, @@ -60,7 +61,7 @@ "mlm:accelerator": "cuda", "mlm:accelerator_constrained": false, "mlm:accelerator_summary": "Unknown", - "mlm:batch_size_suggestion": null, + "mlm:batch_size_suggestion": 256, "mlm:input": [ { "name": "13 Band Sentinel-2 Batch", diff --git a/examples/item_multi_io.json b/examples/item_multi_io.json new file mode 100644 index 0000000..cd1b465 --- /dev/null +++ b/examples/item_multi_io.json @@ -0,0 +1,242 @@ +{ + "stac_version": "1.0.0", + "stac_extensions": [ + "https://stac-extensions.github.io/mlm/v1.0.0/schema.json", + "https://stac-extensions.github.io/raster/v1.1.0/schema.json", + "https://stac-extensions.github.io/file/v1.0.0/schema.json", + "https://stac-extensions.github.io/ml-aoi/v0.2.0/schema.json" + ], + "type": "Feature", + "id": "resnet-18_sentinel-2_all_moco_classification", + "geometry": { + "type": "Polygon", + "coordinates": [ + [ + [ + -7.882190080512502, + 37.13739173208318 + ], + [ + -7.882190080512502, + 58.21798141355221 + ], + [ + 27.911651652899925, + 58.21798141355221 + ], + [ + 27.911651652899925, + 37.13739173208318 + ], + [ + -7.882190080512502, + 37.13739173208318 + ] + ] + ] + }, + "bbox": [ + -7.882190080512502, + 37.13739173208318, + 27.911651652899923, + 58.21798141355221 + ], + "properties": { + "description": "Sourced from torchgeo python library, identifier is ResNet18_Weights.SENTINEL2_ALL_MOCO", + "datetime": null, + "start_datetime": "1900-01-01T00:00:00Z", + "end_datetime": "9999-12-31T23:59:59Z", + "mlm:name": "Resnet-18 Sentinel-2 ALL MOCO", + "mlm:tasks": [ + "classification" + ], + "mlm:architecture": "ResNet", + "mlm:framework": "pytorch", + "mlm:framework_version": "2.1.2+cu121", + "file:size": 43000000, + "mlm:memory_size": 1, + "mlm:total_parameters": 11700000, + "mlm:pretrained_source": "EuroSat Sentinel-2", + "mlm:accelerator": "cuda", + "mlm:accelerator_constrained": false, + "mlm:accelerator_summary": "Unknown", + "mlm:batch_size_suggestion": 256, + "mlm:input": [ + { + "name": "RGB", + "bands": [ + "B04", + "B03", + "B02" + ], + "input": { + "shape": [ + -1, + 3, + 64, + 64 + ], + "dim_order": [ + "batch", + "channel", + "height", + "width" + ], + "data_type": "uint16" + }, + "norm_by_channel": false, + "norm_type": null, + "resize_type": null + }, + { + "name": "NDVI", + "bands": [ + "B04", + "B08" + ], + "pre_processing_function": { + "format": "gdal-calc", + "expression": "(A - B) / (A + B)" + }, + "input": { + "shape": [ + -1, + 1, + 64, + 64 + ], + "dim_order": [ + "batch", + "ndvi", + "height", + "width" + ], + "data_type": "uint16" + } + } + ], + "mlm:output": [ + { + "name": "vegetation-segmentation", + "tasks": [ + "semantic-segmentation" + ], + "result": { + "shape": [ + -1, + 1 + ], + "dim_order": [ + "batch", + "class" + ], + "data_type": "uint8" + }, + "classification_classes": [ + { + "value": 0, + "name": "NON_VEGETATION", + "description": "background pixels", + "color_hint": null + }, + { + "value": 1, + "name": "VEGETATION", + "description": "pixels where vegetation was detected", + "color_hint": [0, 255, 0] + } + ], + "post_processing_function": null + }, + { + "name": "inverse-mask", + "tasks": [ + "semantic-segmentation" + ], + "result": { + "shape": [ + -1, + 1 + ], + "dim_order": [ + "batch", + "class" + ], + "data_type": "uint8" + }, + "classification_classes": [ + { + "value": 0, + "name": "NON_VEGETATION", + "description": "background pixels", + "color_hint": [255, 255, 255] + }, + { + "value": 1, + "name": "VEGETATION", + "description": "pixels where vegetation was detected", + "color_hint": [0, 0, 0] + } + ], + "post_processing_function": { + "format": "gdal-calc", + "expression": "logical_not(A)" + } + } + ], + "raster:bands": [ + { + "name": "B02 - blue", + "nodata": 0, + "data_type": "uint16", + "bits_per_sample": 15, + "spatial_resolution": 10, + "scale": 0.0001, + "offset": 0, + "unit": "m" + }, + { + "name": "B03 - green", + "nodata": 0, + "data_type": "uint16", + "bits_per_sample": 15, + "spatial_resolution": 10, + "scale": 0.0001, + "offset": 0, + "unit": "m" + }, + { + "name": "B04 - red", + "nodata": 0, + "data_type": "uint16", + "bits_per_sample": 15, + "spatial_resolution": 10, + "scale": 0.0001, + "offset": 0, + "unit": "m" + }, + { + "name": "B08 - nir", + "nodata": 0, + "data_type": "uint16", + "bits_per_sample": 15, + "spatial_resolution": 10, + "scale": 0.0001, + "offset": 0, + "unit": "m" + } + ] + }, + "assets": { + "weights": { + "href": "https://huggingface.co/torchgeo/resnet50_sentinel2_rgb_moco/blob/main/resnet50_sentinel2_rgb_moco.pth", + "title": "Pytorch weights checkpoint", + "description": "A Resnet-50 classification model trained on Sentinel-2 RGB imagery with torchgeo.", + "type": "application/octet-stream; application=pytorch", + "roles": [ + "mlm:model", + "mlm:weights" + ] + } + } +} diff --git a/examples/example.json b/examples/item_raster_bands.json similarity index 86% rename from examples/example.json rename to examples/item_raster_bands.json index cb3a41a..1514819 100644 --- a/examples/example.json +++ b/examples/item_raster_bands.json @@ -50,6 +50,7 @@ "mlm:tasks": [ "classification" ], + "mlm:architecture": "ResNet", "mlm:framework": "pytorch", "mlm:framework_version": "2.1.2+cu121", "file:size": 43000000, @@ -59,7 +60,7 @@ "mlm:accelerator": "cuda", "mlm:accelerator_constrained": false, "mlm:accelerator_summary": "Unknown", - "mlm:batch_size_suggestion": null, + "mlm:batch_size_suggestion": 256, "mlm:input": [ { "name": "13 Band Sentinel-2 Batch", @@ -93,47 +94,8 @@ ], "data_type": "float32" }, - "norm_by_channel": true, - "norm_type": "z-score", + "norm_type": null, "resize_type": null, - "parameters": null, - "statistics": { - "minimum": null, - "maximum": null, - "mean": [ - 1354.40546513, - 1118.24399958, - 1042.92983953, - 947.62620298, - 1199.47283961, - 1999.79090914, - 2369.22292565, - 2296.82608323, - 732.08340178, - 12.11327804, - 1819.01027855, - 1118.92391149, - 2594.14080798 - ], - "stddev": [ - 245.71762908, - 333.00778264, - 395.09249139, - 593.75055589, - 566.4170017, - 861.18399006, - 1086.63139075, - 1117.98170791, - 404.91978886, - 4.77584468, - 1002.58768311, - 761.30323499, - 1231.58581042 - ], - "count": null, - "valid_percent": null - }, - "norm_with_clip_values": null, "pre_processing_function": { "format": "python", "expression": "torchgeo.datamodules.eurosat.EuroSATDataModule.collate_fn" @@ -146,19 +108,17 @@ "tasks": [ "classification" ], - "result": [ - { - "shape": [ - -1, - 10 - ], - "dim_order": [ - "batch", - "class" - ], - "data_type": "float32" - } - ], + "result": { + "shape": [ + -1, + 10 + ], + "dim_order": [ + "batch", + "class" + ], + "data_type": "float32" + }, "classification_classes": [ { "value": 0, diff --git a/json-schema/schema.json b/json-schema/schema.json index af3af2a..014e0a5 100644 --- a/json-schema/schema.json +++ b/json-schema/schema.json @@ -24,7 +24,6 @@ "required": [ "mlm:name", "mlm:architecture", - "mlm:framework", "mlm:tasks", "mlm:input", "mlm:output" @@ -208,7 +207,7 @@ }, "mlm:name": { "type": "string", - "pattern": "^[a-zA-Z][a-zA-Z0-9_.-]+[a-zA-Z0-9]$" + "pattern": "^[a-zA-Z][a-zA-Z0-9_.\\-\\s]+[a-zA-Z0-9]$" }, "mlm:architecture": { "type": "string", @@ -605,16 +604,12 @@ "$ref": "https://stac-extensions.github.io/raster/v1.1.0/schema.json#/definitions/bands/items/properties/data_type" }, "AssetModelRole": { - "required": ["assets"], + "required": ["roles"], "properties": { - "assets": { - "additionalProperties": { - "required": ["roles"], - "properties": { - "roles": { - "contains": "mlm:model" - } - } + "roles": { + "contains": { + "type": "string", + "const": "mlm:model" } } } @@ -631,7 +626,7 @@ }, { "$comment": "However, if any band is indicated, a 'bands'-compliant section should describe them.", - "$ref": "#/$defs/AnyBandsRef" + "FIXME_$ref": "#/$defs/AnyBandsRef" } ] }, @@ -658,10 +653,10 @@ "properties": { "bands": { "type": "array", + "minItems": 1, "items": { "type": "string", - "$comment": "This 'minItems' is the purpose of this whole 'if/then' block.", - "minItems": 1 + "$comment": "This 'minItems' is the purpose of this whole 'if/then' block." } } } @@ -756,6 +751,40 @@ ] } ] + }, + "else": { + "$comment": "This is the JSON-object 'properties' definition.", + "properties": { + "$comment": "This is the STAC-Item 'properties' field.", + "properties": { + "required": [ + "mlm:input" + ], + "$comment": "This is the JSON-object 'properties' definition for the STAC Item 'properties' field.", + "properties": { + "$comment": "Required MLM bands listing referring to at least one band name.", + "mlm:input": { + "type": "array", + "items": { + "$comment": "This is the 'Model Input Object' properties.", + "properties": { + "bands": { + "oneOf": [ + { + "type": "null" + }, + { + "type": "array", + "maxItems": 0 + } + ] + } + } + } + } + } + } + } } } } diff --git a/stac_model/input.py b/stac_model/input.py index 107fc5c..680c603 100644 --- a/stac_model/input.py +++ b/stac_model/input.py @@ -57,7 +57,7 @@ class Band(BaseModel): class ModelInput(BaseModel): name: str - bands: List[str] + bands: List[str] # order is critical here (same index as dim shape), allow duplicate if the model needs it somehow input: InputArray norm_by_channel: bool = None norm_type: NormalizeType = None diff --git a/stac_model/runtime.py b/stac_model/runtime.py index 1c0491f..c0a685b 100644 --- a/stac_model/runtime.py +++ b/stac_model/runtime.py @@ -41,11 +41,11 @@ def __str__(self): class Runtime(BaseModel): - framework: str - framework_version: str - file_size: int = Field(alias="file:size") - memory_size: int - batch_size_suggestion: Optional[int] = None + framework: str = Field(default="", exclude_defaults=True, exclude_unset=True) + framework_version: str = Field(default="", exclude_defaults=True, exclude_unset=True) + file_size: int = Field(alias="file:size", default=0, exclude_defaults=True, exclude_unset=True) + memory_size: int = Field(default=0, exclude_defaults=True, exclude_unset=True) + batch_size_suggestion: Optional[int] = Field(default=None, exclude_defaults=True, exclude_unset=True) accelerator: Optional[AcceleratorEnum] = Field(exclude_unset=True, default=None) accelerator_constrained: bool = Field(exclude_unset=True, default=False) diff --git a/tests/test_schema.py b/tests/test_schema.py index 61f717a..b21b0e2 100644 --- a/tests/test_schema.py +++ b/tests/test_schema.py @@ -8,8 +8,10 @@ @pytest.mark.parametrize( "mlm_example", # value passed to 'mlm_example' fixture [ - "example.json", - "example_eo_bands.json", + "item_basic.json", + "item_raster_bands.json", + "item_eo_bands.json", + "item_multi_io.json", ], indirect=True, ) From d111678e45cd02095cdfffb102562e3061ed5852 Mon Sep 17 00:00:00 2001 From: Francis Charette-Migneault Date: Thu, 4 Apr 2024 22:37:32 -0400 Subject: [PATCH 087/112] adjust pydantic eurosat_example with json-schema fields --- stac_model/examples.py | 51 ++++++++++++++++++++++++++++-------------- stac_model/runtime.py | 17 ++++++++++++-- tests/test_schema.py | 6 +++++ 3 files changed, 55 insertions(+), 19 deletions(-) diff --git a/stac_model/examples.py b/stac_model/examples.py index 9747086..aaeefa5 100644 --- a/stac_model/examples.py +++ b/stac_model/examples.py @@ -1,6 +1,9 @@ import pystac import json import shapely +from dateutil.parser import parse as parse_dt +from pystac import media_type + from stac_model.base import ProcessingExpression from stac_model.input import ModelInput from stac_model.output import ModelOutput, ModelResult @@ -86,20 +89,6 @@ def eurosat_resnet() -> MLModelExtension[pystac.Item]: expression="torchgeo.datamodules.eurosat.EuroSATDataModule.collate_fn" ), # noqa: E501 ) - # runtime = Runtime( - # framework="torch", - # version="2.1.2+cu121", - # asset=Asset(title = "Pytorch weights checkpoint", description="A Resnet-18 classification model trained on normalized Sentinel-2 imagery with Eurosat landcover labels with torchgeo", # noqa: E501 - # type=".pth", roles=["weights"], href="https://huggingface.co/torchgeo/resnet18_sentinel2_all_moco/resolve/main/resnet18_sentinel2_all_moco-59bfdff9.pth" # noqa: E501 - # ), - # source_code=Asset( - # href="https://github.com/microsoft/torchgeo/blob/61efd2e2c4df7ebe3bd03002ebbaeaa3cfe9885a/torchgeo/models/resnet.py#L207" # noqa: E501 - # ), - # accelerator="cuda", - # accelerator_constrained=False, - # hardware_summary="Unknown", - # commit_hash="61efd2e2c4df7ebe3bd03002ebbaeaa3cfe9885a", - # ) result_array = ModelResult( shape=[-1, 10], dim_order=["batch", "class"], @@ -128,11 +117,38 @@ def eurosat_resnet() -> MLModelExtension[pystac.Item]: result=result_array, post_processing_function=None, ) + assets = { + "model": pystac.Asset( + title="Pytorch weights checkpoint", + description=( + "A Resnet-18 classification model trained on normalized Sentinel-2 " + "imagery with Eurosat landcover labels with torchgeo." + ), + href="https://huggingface.co/torchgeo/resnet18_sentinel2_all_moco/resolve/main/resnet18_sentinel2_all_moco-59bfdff9.pth", + media_type="application/octet-stream; application=pytorch", + roles=[ + "mlm:model", + "mlm:weights", + "data" + ] + ), + "source_code": pystac.Asset( + href="https://github.com/microsoft/torchgeo/blob/61efd2e2c4df7ebe3bd03002ebbaeaa3cfe9885a/torchgeo/models/resnet.py#L207", + media_type="text/x-python", + roles=[ + "mlm:model", + "code" + ] + ) + } ml_model_meta = MLModelProperties( name="Resnet-18 Sentinel-2 ALL MOCO", tasks={"classification"}, framework="pytorch", framework_version="2.1.2+cu121", + accelerator="cuda", + accelerator_constrained=False, + accelerator_summary="Unknown", file_size=43000000, memory_size=1, pretrained_source="EuroSat Sentinel-2", @@ -144,7 +160,7 @@ def eurosat_resnet() -> MLModelExtension[pystac.Item]: # in docs. start_datetime=datetime.strptime("1900-01-01", "%Y-%m-%d") # Is this a problem that we don't do date validation if we supply as str? start_datetime = "1900-01-01" - end_datetime = None + end_datetime = "9999-01-01" # cannot be None, invalid against STAC Core! bbox = [ -7.882190080512502, 37.13739173208318, @@ -159,13 +175,14 @@ def eurosat_resnet() -> MLModelExtension[pystac.Item]: bbox=bbox, datetime=None, properties={ - "start_datetime": start_datetime, - "end_datetime": end_datetime, + "start_datetime": parse_dt(start_datetime).isoformat() + "Z", + "end_datetime": parse_dt(end_datetime).isoformat() + "Z", "description": ( "Sourced from torchgeo python library," "identifier is ResNet18_Weights.SENTINEL2_ALL_MOCO" ), }, + assets=assets, ) item.add_derived_from( "https://earth-search.aws.element84.com/v1/collections/sentinel-2-l2a" diff --git a/stac_model/runtime.py b/stac_model/runtime.py index c0a685b..bf38313 100644 --- a/stac_model/runtime.py +++ b/stac_model/runtime.py @@ -1,5 +1,5 @@ from enum import Enum -from typing import List, Optional +from typing import List, Literal, Optional, Union from pydantic import AnyUrl, BaseModel, ConfigDict, FilePath, Field @@ -40,6 +40,19 @@ def __str__(self): return self.value +AcceleratorName = Literal[ + "amd64", + "cuda", + "xla", + "amd-rocm", + "intel-ipex-cpu", + "intel-ipex-gpu", + "macos-arm", +] + +AcceleratorType = Union[AcceleratorName, AcceleratorEnum] + + class Runtime(BaseModel): framework: str = Field(default="", exclude_defaults=True, exclude_unset=True) framework_version: str = Field(default="", exclude_defaults=True, exclude_unset=True) @@ -47,7 +60,7 @@ class Runtime(BaseModel): memory_size: int = Field(default=0, exclude_defaults=True, exclude_unset=True) batch_size_suggestion: Optional[int] = Field(default=None, exclude_defaults=True, exclude_unset=True) - accelerator: Optional[AcceleratorEnum] = Field(exclude_unset=True, default=None) + accelerator: Optional[AcceleratorType] = Field(exclude_unset=True, default=None) accelerator_constrained: bool = Field(exclude_unset=True, default=False) accelerator_summary: str = Field(exclude_unset=True, exclude_defaults=True, default="") accelerator_count: int = Field(minimum=1, exclude_unset=True, exclude_defaults=True, default=-1) diff --git a/tests/test_schema.py b/tests/test_schema.py index b21b0e2..e720cc7 100644 --- a/tests/test_schema.py +++ b/tests/test_schema.py @@ -31,3 +31,9 @@ def test_model_metadata_to_dict(eurosat_resnet): def test_validate_model_metadata(eurosat_resnet): assert pystac.read_dict(eurosat_resnet.item.to_dict()) + + +def test_validate_model_against_schema(eurosat_resnet, mlm_validator): + mlm_item = pystac.read_dict(eurosat_resnet.item.to_dict()) + validated = pystac.validation.validate(mlm_item, validator=mlm_validator) + assert SCHEMA_URI in validated From 4d57e4156f182a7f1253514bfc796dfeddd190c5 Mon Sep 17 00:00:00 2001 From: Francis Charette-Migneault Date: Fri, 5 Apr 2024 00:27:20 -0400 Subject: [PATCH 088/112] fix pydantic drop unset fields as intended --- json-schema/schema.json | 6 +- stac_model/base.py | 39 +++++++++++- stac_model/examples.py | 130 +++++++++++++++++++++------------------- stac_model/input.py | 51 +++++++--------- stac_model/output.py | 16 +++-- stac_model/runtime.py | 47 ++++----------- stac_model/schema.py | 19 +++--- 7 files changed, 165 insertions(+), 143 deletions(-) diff --git a/json-schema/schema.json b/json-schema/schema.json index 014e0a5..db1b466 100644 --- a/json-schema/schema.json +++ b/json-schema/schema.json @@ -588,7 +588,11 @@ }, "InputStatistics": { "$comment": "MLM statistics for the specific input relevant for normalization for ML features.", - "$ref": "https://stac-extensions.github.io/raster/v1.1.0/schema.json#/definitions/bands/items/properties/statistics" + "type": "array", + "minItems": 1, + "items": { + "$ref": "https://stac-extensions.github.io/raster/v1.1.0/schema.json#/definitions/bands/items/properties/statistics" + } }, "ProcessingExpression": { "oneOf": [ diff --git a/stac_model/base.py b/stac_model/base.py index 4f4235d..ebbedb0 100644 --- a/stac_model/base.py +++ b/stac_model/base.py @@ -1,7 +1,44 @@ +from dataclasses import dataclass from enum import Enum from typing import Any, Literal, Union, TypeAlias -from pydantic import BaseModel +from pydantic import BaseModel, model_serializer + + +@dataclass +class _OmitIfNone: + pass + + +OmitIfNone = _OmitIfNone() + + +class MLMBaseModel(BaseModel): + """ + Allows wrapping any field with an annotation to drop it entirely if unset. + + ``` + field: Annotated[Optional[], OmitIfNone] = None + # or + field: Annotated[, OmitIfNone] = None + # or + field: Annotated[, OmitIfNone] = Field(default=None) + ``` + + It is important to use `MLMBaseModel`, otherwise the serializer will not be called and applied. + """ + @model_serializer + def model_serialize(self): + omit_if_none_fields = { + key: field + for key, field in self.model_fields.items() + if any(isinstance(m, _OmitIfNone) for m in field.metadata) + } + values = { + self.__fields__[key].alias or key: val # use the alias if specified + for key, val in self if key not in omit_if_none_fields or val is not None + } + return values DataType: TypeAlias = Literal[ diff --git a/stac_model/examples.py b/stac_model/examples.py index aaeefa5..ca68949 100644 --- a/stac_model/examples.py +++ b/stac_model/examples.py @@ -2,24 +2,18 @@ import json import shapely from dateutil.parser import parse as parse_dt -from pystac import media_type +from typing import cast + +from pystac.extensions.file import FileExtension from stac_model.base import ProcessingExpression -from stac_model.input import ModelInput -from stac_model.output import ModelOutput, ModelResult -from stac_model.schema import ( - Asset, - InputArray, - MLMClassification, - MLModelExtension, - MLModelProperties, - Runtime, - Statistics, -) +from stac_model.input import ModelInput, InputStructure, MLMStatistic +from stac_model.output import ModelOutput, ModelResult, MLMClassification +from stac_model.schema import MLModelExtension, MLModelProperties def eurosat_resnet() -> MLModelExtension[pystac.Item]: - input_array = InputArray( + input_array = InputStructure( shape=[-1, 13, 64, 64], dim_order=[ "batch", @@ -29,53 +23,56 @@ def eurosat_resnet() -> MLModelExtension[pystac.Item]: ], data_type="float32", ) - band_names = [ - "B01", - "B02", - "B03", - "B04", - "B05", - "B06", - "B07", - "B08", - "B8A", - "B09", - "B10", - "B11", - "B12", + band_names = [] + # band_names = [ + # "B01", + # "B02", + # "B03", + # "B04", + # "B05", + # "B06", + # "B07", + # "B08", + # "B8A", + # "B09", + # "B10", + # "B11", + # "B12", + # ] + stats_mean = [ + 1354.40546513, + 1118.24399958, + 1042.92983953, + 947.62620298, + 1199.47283961, + 1999.79090914, + 2369.22292565, + 2296.82608323, + 732.08340178, + 12.11327804, + 1819.01027855, + 1118.92391149, + 2594.14080798, + ] + stats_stddev = [ + 245.71762908, + 333.00778264, + 395.09249139, + 593.75055589, + 566.4170017, + 861.18399006, + 1086.63139075, + 1117.98170791, + 404.91978886, + 4.77584468, + 1002.58768311, + 761.30323499, + 1231.58581042, + ] + stats = [ + MLMStatistic(mean=mean, stddev=stddev) + for mean, stddev in zip(stats_mean, stats_stddev) ] - stats = Statistics( - mean=[ - 1354.40546513, - 1118.24399958, - 1042.92983953, - 947.62620298, - 1199.47283961, - 1999.79090914, - 2369.22292565, - 2296.82608323, - 732.08340178, - 12.11327804, - 1819.01027855, - 1118.92391149, - 2594.14080798, - ], - stddev=[ - 245.71762908, - 333.00778264, - 395.09249139, - 593.75055589, - 566.4170017, - 861.18399006, - 1086.63139075, - 1117.98170791, - 404.91978886, - 4.77584468, - 1002.58768311, - 761.30323499, - 1231.58581042, - ], - ) input = ModelInput( name="13 Band Sentinel-2 Batch", bands=band_names, @@ -141,16 +138,20 @@ def eurosat_resnet() -> MLModelExtension[pystac.Item]: ] ) } + + ml_model_size = 43000000 ml_model_meta = MLModelProperties( name="Resnet-18 Sentinel-2 ALL MOCO", + architecture="ResNet-18", tasks={"classification"}, framework="pytorch", framework_version="2.1.2+cu121", accelerator="cuda", accelerator_constrained=False, accelerator_summary="Unknown", - file_size=43000000, + file_size=ml_model_size, memory_size=1, + pretrained=True, pretrained_source="EuroSat Sentinel-2", total_parameters=11_700_000, input=[input], @@ -187,6 +188,13 @@ def eurosat_resnet() -> MLModelExtension[pystac.Item]: item.add_derived_from( "https://earth-search.aws.element84.com/v1/collections/sentinel-2-l2a" ) + + model_asset = cast( + FileExtension[pystac.Asset], + pystac.extensions.file.FileExtension.ext(assets["model"], add_if_missing=True) + ) + model_asset.apply(size=ml_model_size) + item_mlm = MLModelExtension.ext(item, add_if_missing=True) - item_mlm.apply(ml_model_meta.model_dump(by_alias=True)) + item_mlm.apply(ml_model_meta.model_dump(by_alias=True, exclude_unset=True, exclude_defaults=True)) return item_mlm diff --git a/stac_model/input.py b/stac_model/input.py index 680c603..07b2e64 100644 --- a/stac_model/input.py +++ b/stac_model/input.py @@ -1,31 +1,26 @@ -from typing import Any, List, Literal, Optional, Set, TypeAlias, Union +from typing import Any, Annotated, List, Literal, Optional, Set, TypeAlias, Union -from pydantic import BaseModel, Field +from pystac.extensions.raster import Statistics +from pydantic import ConfigDict, Field, model_serializer -from stac_model.base import DataType, ProcessingExpression +from stac_model.base import DataType, MLMBaseModel, ProcessingExpression, OmitIfNone - -class InputArray(BaseModel): - shape: List[Union[int, float]] = Field(..., min_items=1) - dim_order: List[str] = Field(..., min_items=1) - data_type: DataType +Number: TypeAlias = Union[int, float] -class Statistics(BaseModel): - minimum: Optional[List[Union[float, int]]] = None - maximum: Optional[List[Union[float, int]]] = None - mean: Optional[List[float]] = None - stddev: Optional[List[float]] = None - count: Optional[List[int]] = None - valid_percent: Optional[List[float]] = None +class InputStructure(MLMBaseModel): + shape: List[Union[int, float]] = Field(min_items=1) + dim_order: List[str] = Field(min_items=1) + data_type: DataType -class Band(BaseModel): - name: str - description: Optional[str] = None - nodata: Union[float, int, str] - data_type: str - unit: Optional[str] = None +class MLMStatistic(MLMBaseModel): # FIXME: add 'Statistics' dep from raster extension (cases required to be triggered) + minimum: Annotated[Optional[Number], OmitIfNone] = None + maximum: Annotated[Optional[Number], OmitIfNone] = None + mean: Annotated[Optional[Number], OmitIfNone] = None + stddev: Annotated[Optional[Number], OmitIfNone] = None + count: Annotated[Optional[int], OmitIfNone] = None + valid_percent: Annotated[Optional[Number], OmitIfNone] = None NormalizeType: TypeAlias = Optional[Literal[ @@ -55,13 +50,13 @@ class Band(BaseModel): ]] -class ModelInput(BaseModel): +class ModelInput(MLMBaseModel): name: str bands: List[str] # order is critical here (same index as dim shape), allow duplicate if the model needs it somehow - input: InputArray - norm_by_channel: bool = None - norm_type: NormalizeType = None - norm_clip: Optional[List[Union[float, int]]] = None - resize_type: ResizeType = None - statistics: Optional[Union[Statistics, List[Statistics]]] = None + input: InputStructure + norm_by_channel: Annotated[bool, OmitIfNone] = None + norm_type: Annotated[NormalizeType, OmitIfNone] = None + norm_clip: Annotated[List[Union[float, int]], OmitIfNone] = None + resize_type: Annotated[ResizeType, OmitIfNone] = None + statistics: Annotated[List[MLMStatistic], OmitIfNone] = None pre_processing_function: Optional[ProcessingExpression] = None diff --git a/stac_model/output.py b/stac_model/output.py index f08cbb2..f6c6933 100644 --- a/stac_model/output.py +++ b/stac_model/output.py @@ -2,12 +2,12 @@ from typing_extensions import NotRequired, TypedDict from pystac.extensions.classification import Classification -from pydantic import AliasChoices, BaseModel, ConfigDict, Field, PlainSerializer, model_serializer +from pydantic import AliasChoices, ConfigDict, Field, PlainSerializer, model_serializer -from stac_model.base import DataType, ModelTask, ProcessingExpression +from stac_model.base import DataType, MLMBaseModel, ModelTask, ProcessingExpression, OmitIfNone -class ModelResult(BaseModel): +class ModelResult(MLMBaseModel): shape: List[Union[int, float]] = Field(..., min_items=1) dim_order: List[str] = Field(..., min_items=1) data_type: DataType @@ -31,7 +31,7 @@ class ModelResult(BaseModel): # ] -class MLMClassification(BaseModel, Classification): +class MLMClassification(MLMBaseModel, Classification): @model_serializer() def model_dump(self, *_, **__) -> Dict[str, Any]: return self.to_dict() @@ -60,7 +60,7 @@ def __setattr__(self, key: str, value: Any) -> None: if key == "properties": Classification.__setattr__(self, key, value) else: - BaseModel.__setattr__(self, key, value) + MLMBaseModel.__setattr__(self, key, value) model_config = ConfigDict(arbitrary_types_allowed=True) @@ -73,7 +73,7 @@ def __setattr__(self, key: str, value: Any) -> None: # nodata: Optional[bool] = False -class ModelOutput(BaseModel): +class ModelOutput(MLMBaseModel): name: str tasks: Set[ModelTask] result: ModelResult @@ -83,11 +83,9 @@ class ModelOutput(BaseModel): # it is more important to keep the order in this case, # which we would lose with 'Set'. # We also get some unhashable errors with 'Set', although 'MLMClassification' implements '__hash__'. - classes: List[MLMClassification] = Field( + classes: Annotated[List[MLMClassification], OmitIfNone] = Field( alias="classification:classes", validation_alias=AliasChoices("classification:classes", "classification_classes"), - exclude_unset=True, - exclude_defaults=True ) post_processing_function: Optional[ProcessingExpression] = None diff --git a/stac_model/runtime.py b/stac_model/runtime.py index bf38313..12e989b 100644 --- a/stac_model/runtime.py +++ b/stac_model/runtime.py @@ -1,30 +1,9 @@ from enum import Enum -from typing import List, Literal, Optional, Union +from typing import Annotated, Literal, Optional, Union -from pydantic import AnyUrl, BaseModel, ConfigDict, FilePath, Field +from pydantic import Field - -class Asset(BaseModel): - """Information about the model location and other additional file locations. - Follows the STAC Asset Object spec. - """ - - href: FilePath | AnyUrl | str - title: Optional[str] = None - description: Optional[str] = None - type: Optional[str] = None - roles: Optional[List[str]] = None - - model_config = ConfigDict(arbitrary_types_allowed=True) - - -class Container(BaseModel): - container_file: str - image_name: str - tag: str - working_dir: str - run: str - accelerator: bool +from stac_model.base import MLMBaseModel, OmitIfNone class AcceleratorEnum(str, Enum): @@ -53,14 +32,14 @@ def __str__(self): AcceleratorType = Union[AcceleratorName, AcceleratorEnum] -class Runtime(BaseModel): - framework: str = Field(default="", exclude_defaults=True, exclude_unset=True) - framework_version: str = Field(default="", exclude_defaults=True, exclude_unset=True) - file_size: int = Field(alias="file:size", default=0, exclude_defaults=True, exclude_unset=True) - memory_size: int = Field(default=0, exclude_defaults=True, exclude_unset=True) - batch_size_suggestion: Optional[int] = Field(default=None, exclude_defaults=True, exclude_unset=True) +class Runtime(MLMBaseModel): + framework: Annotated[str, OmitIfNone] = Field(default=None) + framework_version: Annotated[str, OmitIfNone] = Field(default=None) + file_size: Annotated[int, OmitIfNone] = Field(alias="file:size", default=None) + memory_size: Annotated[int, OmitIfNone] = Field(default=None) + batch_size_suggestion: Annotated[int, OmitIfNone] = Field(default=None) - accelerator: Optional[AcceleratorType] = Field(exclude_unset=True, default=None) - accelerator_constrained: bool = Field(exclude_unset=True, default=False) - accelerator_summary: str = Field(exclude_unset=True, exclude_defaults=True, default="") - accelerator_count: int = Field(minimum=1, exclude_unset=True, exclude_defaults=True, default=-1) + accelerator: Optional[AcceleratorType] = Field(default=None) + accelerator_constrained: bool = Field(default=False) + accelerator_summary: Annotated[str, OmitIfNone] = Field(default=None) + accelerator_count: Annotated[int, OmitIfNone] = Field(default=None, minimum=1) diff --git a/stac_model/schema.py b/stac_model/schema.py index 13f13b9..b7e4cc3 100644 --- a/stac_model/schema.py +++ b/stac_model/schema.py @@ -1,7 +1,7 @@ import json from typing import ( Any, - Dict, + Annotated, Generic, Iterable, List, @@ -15,7 +15,7 @@ ) import pystac -from pydantic import BaseModel, ConfigDict, Field +from pydantic import ConfigDict, Field from pydantic.fields import FieldInfo from pystac.extensions import item_assets from pystac.extensions.base import ( @@ -25,10 +25,10 @@ SummariesExtension, ) -from stac_model.base import DataType, ModelTask -from stac_model.input import Band, InputArray, ModelInput, Statistics -from stac_model.output import MLMClassification, ModelOutput -from stac_model.runtime import Asset, Container, Runtime +from stac_model.base import ModelTask, OmitIfNone +from stac_model.input import ModelInput +from stac_model.output import ModelOutput +from stac_model.runtime import Runtime T = TypeVar( "T", pystac.Collection, pystac.Item, pystac.Asset, item_assets.AssetDefinition @@ -44,14 +44,15 @@ def mlm_prefix_adder(field_name: str) -> str: class MLModelProperties(Runtime): - name: str + name: str = Field(min_length=1) + architecture: str = Field(min_length=1) tasks: Set[ModelTask] input: List[ModelInput] output: List[ModelOutput] total_parameters: int - pretrained: bool = Field(exclude_unset=True, default=True) - pretrained_source: Optional[str] = Field(exclude_unset=True) + pretrained: Annotated[Optional[bool], OmitIfNone] = Field(default=True) + pretrained_source: Annotated[Optional[str], OmitIfNone] = None model_config = ConfigDict( alias_generator=mlm_prefix_adder, From 4eb30dab98617dfee4cc6b1e5dbc1d50d4770779 Mon Sep 17 00:00:00 2001 From: Francis Charette-Migneault Date: Fri, 5 Apr 2024 00:31:55 -0400 Subject: [PATCH 089/112] add OmitIfNone reference code --- stac_model/base.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/stac_model/base.py b/stac_model/base.py index ebbedb0..ec82721 100644 --- a/stac_model/base.py +++ b/stac_model/base.py @@ -17,7 +17,7 @@ class MLMBaseModel(BaseModel): """ Allows wrapping any field with an annotation to drop it entirely if unset. - ``` + ```python field: Annotated[Optional[], OmitIfNone] = None # or field: Annotated[, OmitIfNone] = None @@ -26,6 +26,8 @@ class MLMBaseModel(BaseModel): ``` It is important to use `MLMBaseModel`, otherwise the serializer will not be called and applied. + + Reference: https://github.com/pydantic/pydantic/discussions/5461#discussioncomment-7503283 """ @model_serializer def model_serialize(self): From 21557450c71cc6e71999541068f3cda8528b780e Mon Sep 17 00:00:00 2001 From: Francis Charette-Migneault Date: Fri, 5 Apr 2024 11:18:54 -0400 Subject: [PATCH 090/112] fix invalid raster/eo bands/statistics definitions in examples --- examples/item_eo_bands.json | 167 +++++++++++++++++++------------- examples/item_raster_bands.json | 25 ++++- stac_model/examples.py | 31 +++--- 3 files changed, 133 insertions(+), 90 deletions(-) diff --git a/examples/item_eo_bands.json b/examples/item_eo_bands.json index 60a5868..0d1414a 100644 --- a/examples/item_eo_bands.json +++ b/examples/item_eo_bands.json @@ -98,44 +98,60 @@ "norm_by_channel": true, "norm_type": "z-score", "resize_type": null, - "parameters": null, - "statistics": { - "minimum": null, - "maximum": null, - "mean": [ - 1354.40546513, - 1118.24399958, - 1042.92983953, - 947.62620298, - 1199.47283961, - 1999.79090914, - 2369.22292565, - 2296.82608323, - 732.08340178, - 12.11327804, - 1819.01027855, - 1118.92391149, - 2594.14080798 - ], - "stddev": [ - 245.71762908, - 333.00778264, - 395.09249139, - 593.75055589, - 566.4170017, - 861.18399006, - 1086.63139075, - 1117.98170791, - 404.91978886, - 4.77584468, - 1002.58768311, - 761.30323499, - 1231.58581042 - ], - "count": null, - "valid_percent": null - }, - "norm_with_clip_values": null, + "statistics": [ + { + "mean": 1354.40546513, + "stddev": 245.71762908 + }, + { + "mean": 1118.24399958, + "stddev": 333.00778264 + }, + { + "mean": 1042.92983953, + "stddev": 395.09249139 + }, + { + "mean": 947.62620298, + "stddev": 593.75055589 + }, + { + "mean": 1199.47283961, + "stddev": 566.4170017 + }, + { + "mean": 1999.79090914, + "stddev": 861.18399006 + }, + { + "mean": 2369.22292565, + "stddev": 1086.63139075 + }, + { + "mean": 2296.82608323, + "stddev": 1117.98170791 + }, + { + "mean": 732.08340178, + "stddev": 404.91978886 + }, + { + "mean": 12.11327804, + "stddev": 4.77584468 + }, + { + "mean": 1819.01027855, + "stddev": 1002.58768311 + }, + { + "mean": 1118.92391149, + "stddev": 761.30323499 + }, + { + "mean": 2594.14080798, + "stddev": 1231.58581042 + } + ], "pre_processing_function": { "format": "python", "expression": "torchgeo.datamodules.eurosat.EuroSATDataModule.collate_fn" @@ -148,19 +164,17 @@ "tasks": [ "classification" ], - "result": [ - { - "shape": [ - -1, - 10 - ], - "dim_order": [ - "batch", - "class" - ], - "data_type": "float32" - } - ], + "result": { + "shape": [ + -1, + 10 + ], + "dim_order": [ + "batch", + "class" + ], + "data_type": "float32" + }, "classification_classes": [ { "value": 0, @@ -248,91 +262,91 @@ ], "eo:bands": [ { - "name": "coastal", + "name": "B01", "common_name": "coastal", "description": "Coastal aerosol (band 1)", "center_wavelength": 0.443, "full_width_half_max": 0.027 }, { - "name": "blue", + "name": "B02", "common_name": "blue", "description": "Blue (band 2)", "center_wavelength": 0.49, "full_width_half_max": 0.098 }, { - "name": "green", + "name": "B03", "common_name": "green", "description": "Green (band 3)", "center_wavelength": 0.56, "full_width_half_max": 0.045 }, { - "name": "red", + "name": "B04", "common_name": "red", "description": "Red (band 4)", "center_wavelength": 0.665, "full_width_half_max": 0.038 }, { - "name": "rededge1", + "name": "B05", "common_name": "rededge", "description": "Red edge 1 (band 5)", "center_wavelength": 0.704, "full_width_half_max": 0.019 }, { - "name": "rededge2", + "name": "B06", "common_name": "rededge", "description": "Red edge 2 (band 6)", "center_wavelength": 0.74, "full_width_half_max": 0.018 }, { - "name": "rededge3", + "name": "B07", "common_name": "rededge", "description": "Red edge 3 (band 7)", "center_wavelength": 0.783, "full_width_half_max": 0.028 }, { - "name": "nir", + "name": "B08", "common_name": "nir", "description": "NIR 1 (band 8)", "center_wavelength": 0.842, "full_width_half_max": 0.145 }, { - "name": "nir08", + "name": "B8A", "common_name": "nir08", "description": "NIR 2 (band 8A)", "center_wavelength": 0.865, "full_width_half_max": 0.033 }, { - "name": "nir09", + "name": "B09", "common_name": "nir09", "description": "NIR 3 (band 9)", "center_wavelength": 0.945, "full_width_half_max": 0.026 }, { - "name": "cirrus", + "name": "B10", "common_name": "cirrus", "description": "SWIR - Cirrus (band 10)", "center_wavelength": 1.375, "full_width_half_max": 0.026 }, { - "name": "swir16", + "name": "B11", "common_name": "swir16", "description": "SWIR 1 (band 11)", "center_wavelength": 1.61, "full_width_half_max": 0.143 }, { - "name": "swir22", + "name": "B12", "common_name": "swir22", "description": "SWIR 2 (band 12)", "center_wavelength": 2.19, @@ -341,13 +355,17 @@ ], "raster:bands": [ { - "name": "coastal", - "common_name": "coastal", - "description": "Coastal aerosol (band 1)", - "center_wavelength": 0.443, - "full_width_half_max": 0.027 + "name": "B01", + "nodata": 0, + "data_type": "uint16", + "bits_per_sample": 15, + "spatial_resolution": 60, + "scale": 0.0001, + "offset": 0, + "unit": "m" }, { + "name": "B02", "nodata": 0, "data_type": "uint16", "bits_per_sample": 15, @@ -357,6 +375,7 @@ "unit": "m" }, { + "name": "B03", "nodata": 0, "data_type": "uint16", "bits_per_sample": 15, @@ -366,6 +385,7 @@ "unit": "m" }, { + "name": "B04", "nodata": 0, "data_type": "uint16", "bits_per_sample": 15, @@ -375,6 +395,7 @@ "unit": "m" }, { + "name": "B05", "nodata": 0, "data_type": "uint16", "bits_per_sample": 15, @@ -384,6 +405,7 @@ "unit": "m" }, { + "name": "B06", "nodata": 0, "data_type": "uint16", "bits_per_sample": 15, @@ -393,6 +415,7 @@ "unit": "m" }, { + "name": "B07", "nodata": 0, "data_type": "uint16", "bits_per_sample": 15, @@ -402,6 +425,7 @@ "unit": "m" }, { + "name": "B08", "nodata": 0, "data_type": "uint16", "bits_per_sample": 15, @@ -411,6 +435,7 @@ "unit": "m" }, { + "name": "B8A", "nodata": 0, "data_type": "uint16", "bits_per_sample": 15, @@ -420,6 +445,7 @@ "unit": "m" }, { + "name": "B09", "nodata": 0, "data_type": "uint16", "bits_per_sample": 15, @@ -429,6 +455,7 @@ "unit": "m" }, { + "name": "B10", "nodata": 0, "data_type": "uint16", "bits_per_sample": 15, @@ -438,6 +465,7 @@ "unit": "m" }, { + "name": "B11", "nodata": 0, "data_type": "uint16", "bits_per_sample": 15, @@ -447,6 +475,7 @@ "unit": "m" }, { + "name": "B12", "nodata": 0, "data_type": "uint16", "bits_per_sample": 15, diff --git a/examples/item_raster_bands.json b/examples/item_raster_bands.json index 1514819..ed1d765 100644 --- a/examples/item_raster_bands.json +++ b/examples/item_raster_bands.json @@ -206,13 +206,17 @@ ], "raster:bands": [ { - "name": "coastal", - "common_name": "coastal", - "description": "Coastal aerosol (band 1)", - "center_wavelength": 0.443, - "full_width_half_max": 0.027 + "name": "B01", + "nodata": 0, + "data_type": "uint16", + "bits_per_sample": 15, + "spatial_resolution": 60, + "scale": 0.0001, + "offset": 0, + "unit": "m" }, { + "name": "B02", "nodata": 0, "data_type": "uint16", "bits_per_sample": 15, @@ -222,6 +226,7 @@ "unit": "m" }, { + "name": "B03", "nodata": 0, "data_type": "uint16", "bits_per_sample": 15, @@ -231,6 +236,7 @@ "unit": "m" }, { + "name": "B04", "nodata": 0, "data_type": "uint16", "bits_per_sample": 15, @@ -240,6 +246,7 @@ "unit": "m" }, { + "name": "B05", "nodata": 0, "data_type": "uint16", "bits_per_sample": 15, @@ -249,6 +256,7 @@ "unit": "m" }, { + "name": "B06", "nodata": 0, "data_type": "uint16", "bits_per_sample": 15, @@ -258,6 +266,7 @@ "unit": "m" }, { + "name": "B07", "nodata": 0, "data_type": "uint16", "bits_per_sample": 15, @@ -267,6 +276,7 @@ "unit": "m" }, { + "name": "B08", "nodata": 0, "data_type": "uint16", "bits_per_sample": 15, @@ -276,6 +286,7 @@ "unit": "m" }, { + "name": "B8A", "nodata": 0, "data_type": "uint16", "bits_per_sample": 15, @@ -285,6 +296,7 @@ "unit": "m" }, { + "name": "B09", "nodata": 0, "data_type": "uint16", "bits_per_sample": 15, @@ -294,6 +306,7 @@ "unit": "m" }, { + "name": "B10", "nodata": 0, "data_type": "uint16", "bits_per_sample": 15, @@ -303,6 +316,7 @@ "unit": "m" }, { + "name": "B11", "nodata": 0, "data_type": "uint16", "bits_per_sample": 15, @@ -312,6 +326,7 @@ "unit": "m" }, { + "name": "B12", "nodata": 0, "data_type": "uint16", "bits_per_sample": 15, diff --git a/stac_model/examples.py b/stac_model/examples.py index ca68949..667ea38 100644 --- a/stac_model/examples.py +++ b/stac_model/examples.py @@ -23,22 +23,21 @@ def eurosat_resnet() -> MLModelExtension[pystac.Item]: ], data_type="float32", ) - band_names = [] - # band_names = [ - # "B01", - # "B02", - # "B03", - # "B04", - # "B05", - # "B06", - # "B07", - # "B08", - # "B8A", - # "B09", - # "B10", - # "B11", - # "B12", - # ] + band_names = [ + "B01", + "B02", + "B03", + "B04", + "B05", + "B06", + "B07", + "B08", + "B8A", + "B09", + "B10", + "B11", + "B12", + ] stats_mean = [ 1354.40546513, 1118.24399958, From 9d14ac689227a8b4bee544a5b5e8b78b72419bc8 Mon Sep 17 00:00:00 2001 From: Francis Charette-Migneault Date: Tue, 9 Apr 2024 12:14:55 -0400 Subject: [PATCH 091/112] update schema title and description --- json-schema/schema.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/json-schema/schema.json b/json-schema/schema.json index db1b466..0c3722a 100644 --- a/json-schema/schema.json +++ b/json-schema/schema.json @@ -1,8 +1,8 @@ { "$schema": "http://json-schema.org/draft-07/schema#", "$id": "https://stac-extensions.github.io/mlm/v1.0.0/schema.json", - "title": "DL Model Item", - "description": "This object represents the metadata for a Machine Learning Model (MLM).", + "title": "Machine Learning Model STAC Extension Schema", + "description": "This object represents the metadata for a Machine Learning Model (MLM) used in STAC documents.", "oneOf": [ { "$comment": "This is the schema for STAC extension MLM in Items.", From afe0a9add15c199380cea76b87fd00b56b44a955 Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Tue, 9 Apr 2024 11:22:26 -0700 Subject: [PATCH 092/112] remove out of date items from changelog --- CHANGELOG.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f191358..2f0a1d4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,7 +14,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - [`disk_size`, `memory_size`](./README#architecture-object) - [`hardware_summary`, `accelerator`, `accelerator_constrained`](./README#runtime-object) to specify hardware requirements for inference - Use common metadata [Asset Object](https://github.com/radiantearth/stac-spec/blob/master/collection-spec/collection-spec.md#asset-object) to refer to model asset and source code. -- flexible [class map object](./README.md#class-map-object) and [parameters object](./README.md#parameters-object) to handle aspects of models that vary substantially in number +- use `classification:classes` in Model Output - add `scene-classification` to the Enum Tasks to allow disambiguation between pixel-wise and patch-based classification ### Changed @@ -35,8 +35,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - any `dlm`-prefixed field or property ### Removed -- Data Object, replaced with [Model Input Object](./README.md#model-input-object) that uses the `name` field from - the [common metadata band object](https://github.com/radiantearth/stac-spec/blob/f9b3c59ba810541c9da70c5f8d39635f8cba7bcd/item-spec/common-metadata.md#bands) +- Data Object, replaced with [Model Input Object](./README.md#model-input-object) that uses the `name` field from + the [common metadata band object](https://github.com/radiantearth/stac-spec/blob/f9b3c59ba810541c9da70c5f8d39635f8cba7bcd/item-spec/common-metadata.md#bands) which also records `data_type` and `nodata` type # TODO link release here From 1fb5f21163302ad96b13d2e1e9888bea9ee20a0d Mon Sep 17 00:00:00 2001 From: Francis Charette-Migneault Date: Wed, 10 Apr 2024 20:15:26 -0400 Subject: [PATCH 093/112] include PR recommended changes --- README.md | 102 ++++++++++++++++++++++++++-------------- json-schema/schema.json | 8 ++-- 2 files changed, 73 insertions(+), 37 deletions(-) diff --git a/README.md b/README.md index 78e4553..876c6c7 100644 --- a/README.md +++ b/README.md @@ -123,21 +123,21 @@ As a general rule of thumb, if a task is not represented below, an appropriate n definitions listed in [Papers With Code](https://paperswithcode.com/sota). The names should be normalized to lowercase and use hyphens instead of spaces. -| Task Name | Corresponding `label:tasks` | Description | -|-------------------------|-----------------------------|-----------------------------------------------------------------------------------------------------------------| -| `regression` | `regression` | Generic regression that estimates a numeric and continuous value. | -| `classification` | `classification` | Generic classification task that assigns class labels to an output. | -| `scene-classification` | *n/a* | Specific classification task where the model assigns a single class label to an entire scene/area. | -| `detection` | `detection` | Generic detection of the "presence" of objects or entities, with or without positions. | -| `object-detection` | *n/a* | Task corresponding to the identification of positions as bounding boxes of object detected in the scene. | -| `segmentation` | `segmentation` | Generic tasks that regroups all types of segmentations tasks consisting of applying labels to pixels. | -| `semantic-segmentation` | *n/a* | Specific segmentation task where all pixels are attributed labels, without consideration of similar instances. | -| `instance-segmentation` | *n/a* | Specific segmentation task that assigns distinct labels for groups of pixels corresponding to object instances. | -| `panoptic-segmentation` | *n/a* | Specific segmentation task that combines instance segmentation of objects and semantic labels for non-objects. | -| `similarity-search` | *n/a* | Generic task to identify whether a query input corresponds to another reference within a corpus. | -| `generative` | *n/a* | Generic task that encompasses all synthetic data generation techniques. | -| `image-captioning` | *n/a* | Specific task of describing the content of an image in words. | -| `super-resolution` | *n/a* | Specific task that increases the quality and resolution of an image by increasing its high-frequency details. | +| Task Name | Corresponding `label:tasks` | Description | +|-------------------------|-----------------------------|--------------------------------------------------------------------------------------------------------------------------| +| `regression` | `regression` | Generic regression that estimates a numeric and continuous value. | +| `classification` | `classification` | Generic classification task that assigns class labels to an output. | +| `scene-classification` | *n/a* | Specific classification task where the model assigns a single class label to an entire scene/area. | +| `detection` | `detection` | Generic detection of the "presence" of objects or entities, with or without positions. | +| `object-detection` | *n/a* | Task corresponding to the identification of positions as bounding boxes of object detected in the scene. | +| `segmentation` | `segmentation` | Generic tasks that regroups all types of segmentations tasks consisting of applying labels to pixels. | +| `semantic-segmentation` | *n/a* | Specific segmentation task where all pixels are attributed labels, without consideration for segments as unique objects. | +| `instance-segmentation` | *n/a* | Specific segmentation task that assigns distinct labels for groups of pixels corresponding to object instances. | +| `panoptic-segmentation` | *n/a* | Specific segmentation task that combines instance segmentation of objects and semantic labels for non-objects. | +| `similarity-search` | *n/a* | Generic task to identify whether a query input corresponds to another reference within a corpus. | +| `generative` | *n/a* | Generic task that encompasses all synthetic data generation techniques. | +| `image-captioning` | *n/a* | Specific task of describing the content of an image in words. | +| `super-resolution` | *n/a* | Specific task that increases the quality and resolution of an image by increasing its high-frequency details. | If the task falls within the category of supervised machine learning and uses labels during training, this should align with the `label:tasks` values defined in [STAC Label Extension][stac-ext-label-props] for relevant @@ -158,20 +158,22 @@ describe what the model accomplishes. ### Framework -In most cases, this should correspond to common library names of well-established ML frameworks. -No explicit "Enum" is defined to allow easy addition of newer frameworks, but it is recommended +This should correspond to the common library name of a well-established ML framework. +No "Enum" are *enforced* to allow easy addition of newer frameworks, but it is **STRONGLY** recommended to use common names when applicable. Below are a few notable entries. - `PyTorch` - `TensorFlow` - `Scikit-learn` - `Huggingface` -- `PyMC` -- `JAX` +- `Keras` - `ONNX` +- `rgee` +- `spatialRF` +- `JAX` - `MXNet` -- `Keras` - `Caffe` +- `PyMC` - `Weka` ### Accelerator Type Enum @@ -319,7 +321,7 @@ Select one option from: - `inf` - `clip` -See [OpenCV - Interpolation Flags](https://docs.opencv.org/4.x/da/d54/group__imgproc__transform.html#ga5bb5a1fea74ea38e1a5445ca803ff121) +See [OpenCV - Normalization Flags][opencv-normalization-flags] for details about the relevant methods. Equivalent methods from other packages are applicable as well. When a normalization technique is specified, it is expected that the corresponding [Statistics](#bands-and-statistics) @@ -331,6 +333,8 @@ If none of the above values applies, `null` (literal, not string) can be used in If a custom normalization operation, or a combination of operations (with or without [Resize](#resize-enum)), must be defined instead, consider using a [Processing Expression](#processing-expression) reference. +[opencv-normalization-flags]: https://docs.opencv.org/4.x/d2/de8/group__core__array.html#gad12cefbcb5291cf958a85b4b67b6149f + #### Resize Enum Select one option from: @@ -345,13 +349,15 @@ Select one option from: - `wrap-fill-outliers` - `wrap-inverse-map` -See [OpenCV - Normalization Flags](https://docs.opencv.org/4.x/d2/de8/group__core__array.html#ga87eef7ee3970f86906d69a92cbf064bd) +See [OpenCV - Interpolation Flags][opencv-interpolation-flags] for details about the relevant methods. Equivalent methods from other packages are applicable as well. If none of the above values applies, `null` (literal, not string) can be used instead. If a custom rescaling operation, or a combination of operations (with or without [Normalization](#normalize-enum)), must be defined instead, consider using a [Processing Expression](#processing-expression) reference. +[opencv-interpolation-flags]: https://docs.opencv.org/4.x/da/d54/group__imgproc__transform.html#ga5bb5a1fea74ea38e1a5445ca803ff121 + #### Processing Expression Taking inspiration from [Processing Extension - Expression Object][stac-proc-expr], the processing expression defines @@ -501,27 +507,55 @@ by comparison with fields `file:checksum` and `file:size` for example. #### Model Artifact Media-Type -Not all ML framework, libraries or model artifacts provide explicit media-type. When those are not provided, custom -media-types can be considered. For example `application/x-pytorch` or `application/octet-stream; application=pytorch` -could be appropriate to represent a PyTorch `.pt` file, since the underlying format is a serialized pickle structure. +Very few ML framework, libraries or model artifacts provide explicit [IANA registered][iana-media-type] media-type +to represent the contents they handle. When those are not provided, custom media-types can be considered. +However, "*unofficial but well-established*" parameters should be reused over custom media-types when possible. + +For example, the unofficial `application/octet-stream; framework=pytorch` definition is appropriate to represent a +PyTorch `.pt` file, since its underlying format is a serialized pickle structure, and its `framework` parameter +provides a clearer indication about the targeted ML framework and its contents. Since artifacts will typically be +downloaded using a request stream into a runtime environment in order to employ the model, +the `application/octet-stream` media-type is relevant for representing this type of arbitrary binary data. +Being an official media-type, it also has the benefit to increase chances that +HTTP clients will handle download of the contents appropriately when performing requests. In contrast, custom +media-types such as `application/x-pytorch` have higher chances to be considered unacceptable (HTTP 406 Not Acceptable) +by servers, which is why they should preferably be avoided. + +Users can consider adding more parameters to provide additional context, such as `profile=compiled` to provide an +additional hint that the specific [PyTorch Ahead-of-Time Compilation][pytorch-aot-inductor] profile +is used for the artifact described by the media-type. However, users need to remember that those parameters are not +official. In order to validate the specific framework and artifact type employed by the model, the MLM properties +`mlm:framework` (see [MLM Fields](#item-properties-and-collection-fields)) and +`mlm:artifact_type` (see [Model Asset](#model-asset)) should be employed instead to perform this validation if needed. + +[iana-media-type]: https://www.iana.org/assignments/media-types/media-types.xhtml #### Artifact Type Enum This value can be used to provide additional details about the specific model artifact being described. -For example, PyTorch offers various strategies for providing model definitions, such as Pickle (`.pt`), TorchScript, -or the compiled approach. Since they all refer to the same ML framework, -the [Model Artifact Media-Type](#model-artifact-media-type) would be insufficient in this case to detect with strategy -should be used. +For example, PyTorch offers [various strategies][pytorch-frameworks] for providing model definitions, +such as Pickle (`.pt`), [TorchScript][pytorch-jit-script], +or [PyTorch Ahead-of-Time Compilation][pytorch-aot-inductor] (`.pt2`) approach. +Since they all refer to the same ML framework, the [Model Artifact Media-Type](#model-artifact-media-type) +can be insufficient in this case to detect which strategy should be used with. Following are some proposed *Artifact Type* values for corresponding approaches, but other names are permitted as well. Note that the names are selected using the framework-specific definitions to help the users understand the source explicitly, although this is not strictly required either. -| Artifact Type | Description | -|--------------------|--------------------------------------------------------------------------------------------------------------------------| -| `torch.compile` | A model artifact obtained by [`torch.compile`](https://pytorch.org/tutorials/intermediate/torch_compile_tutorial.html). | -| `torch.jit.script` | A model artifact obtained by [`TorchScript`](https://pytorch.org/docs/stable/jit.html). | -| `torch.save` | A model artifact saved by [Serialized Pickle Object](https://pytorch.org/tutorials/beginner/saving_loading_models.html). | +| Artifact Type | Description | +|--------------------|--------------------------------------------------------------------------------------| +| `torch.save` | A model artifact obtained by [Serialized Pickle Object][pytorch.save] (i.e.: `.pt`). | +| `torch.jit.script` | A model artifact obtained by [`TorchScript`][pytorch-jit-script]. | +| `torch.export` | A model artifact obtained by [`torch.export`][pytorch-export] (i.e.: `.pt2`). | +| `torch.compile` | A model artifact obtained by [`torch.compile`][pytorch-compile]. | + +[pytorch-compile]: https://pytorch.org/tutorials/intermediate/torch_compile_tutorial.html +[pytorch-export]: https://pytorch.org/docs/main/export.html +[pytorch-frameworks]: https://pytorch.org/docs/main/export.html#existing-frameworks +[pytorch-aot-inductor]: https://pytorch.org/docs/main/torch.compiler_aot_inductor.html +[pytorch-jit-script]: https://pytorch.org/docs/stable/jit.html +[pytorch-save]: https://pytorch.org/tutorials/beginner/saving_loading_models.html ### Source Code Asset diff --git a/json-schema/schema.json b/json-schema/schema.json index 0c3722a..952042b 100644 --- a/json-schema/schema.json +++ b/json-schema/schema.json @@ -232,12 +232,14 @@ "TensorFlow", "Scikit-learn", "Huggingface", - "PyMC", - "JAX", + "Keras", "ONNX", + "rgee", + "spatialRF", + "JAX", "MXNet", - "Keras", "Caffe", + "PyMC", "Weka" ] }, From f1bee682cbcaf99b3d199dfb98d2610bc3e6e091 Mon Sep 17 00:00:00 2001 From: Francis Charette-Migneault Date: Wed, 17 Apr 2024 14:36:04 -0400 Subject: [PATCH 094/112] fix github ci command to instlal poetry --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 1775cfb..df63b26 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -19,7 +19,7 @@ jobs: python-version: ${{ matrix.python-version }} - name: Install poetry - run: make poetry-download + run: make poetry-install - name: Set up cache uses: actions/cache@v2.1.6 From 5ad13fa144eea2c7728c62fbcf56af468b9b670d Mon Sep 17 00:00:00 2001 From: Francis Charette-Migneault Date: Wed, 17 Apr 2024 14:44:35 -0400 Subject: [PATCH 095/112] update ci commands --- .github/workflows/build.yml | 6 ++---- Makefile | 12 +++++++++++- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index df63b26..8fe8d65 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -27,13 +27,11 @@ jobs: path: .venv key: venv-${{ matrix.python-version }}-${{ hashFiles('pyproject.toml') }}-${{ hashFiles('poetry.lock') }} - name: Install dependencies - run: | - poetry config virtualenvs.in-project true - poetry install + run: make install-dev - name: Run style checks run: | - make check-codestyle + make lint - name: Run tests run: | diff --git a/Makefile b/Makefile index 90b3c16..950376d 100644 --- a/Makefile +++ b/Makefile @@ -16,14 +16,21 @@ poetry-remove: poetry-plugins: poetry self add poetry-plugin-up +.PHONY: poetry-env +poetry-env: + poetry config virtualenvs.in-project true #* Installation .PHONY: install -install: +install: poetry-env poetry lock -n && poetry export --without-hashes > requirements.txt poetry install -n -poetry run mypy --install-types --non-interactive ./ +.PHONY: install-dev +install-dev: poetry-env install + poetry install -n --with dev + .PHONY: pre-commit-install pre-commit-install: poetry run pre-commit install @@ -58,6 +65,9 @@ lint: poetry run pydocstyle --count --config=pyproject.toml ./ poetry run pydoclint --config=pyproject.toml ./ +.PHONY: check-lint +check-lint: lint + .PHONY: lint-all lint: test lint mypy check-safety From a6bf8ee62756855a7e6b6587a6a4bd732b8359ea Mon Sep 17 00:00:00 2001 From: Francis Charette-Migneault Date: Wed, 17 Apr 2024 16:01:47 -0400 Subject: [PATCH 096/112] update and fix markdown linting --- .dockerignore | 1 + .github/remark.yaml | 9 +- .gitignore | 2 + CHANGELOG.md | 52 +++++-- CONTRIBUTING.md | 91 ++++++++++++ CONTRIBUTING_STAC_MODEL.md | 96 ------------ Makefile | 26 +++- README.md | 80 +++++----- README_DLM_LEGACY.md | 9 ++ README_STAC_MODEL.md | 26 ++-- best-practices.md | 21 ++- package.json | 2 + poetry.lock | 290 ++++++++++++++++++------------------- 13 files changed, 392 insertions(+), 313 deletions(-) create mode 100644 CONTRIBUTING.md delete mode 100644 CONTRIBUTING_STAC_MODEL.md create mode 100644 README_DLM_LEGACY.md diff --git a/.dockerignore b/.dockerignore index 6be1e24..b6d76d3 100644 --- a/.dockerignore +++ b/.dockerignore @@ -24,6 +24,7 @@ __pycache__/ # poetry .venv +requirements*.txt # C extensions *.so diff --git a/.github/remark.yaml b/.github/remark.yaml index 9d1b95c..a58537b 100644 --- a/.github/remark.yaml +++ b/.github/remark.yaml @@ -4,7 +4,8 @@ plugins: # Apply some recommended defaults for consistency - remark-preset-lint-consistent - remark-preset-lint-recommended - - lint-no-html + - - lint-no-html + - false # General formatting - - remark-lint-emphasis-marker - '*' @@ -12,7 +13,9 @@ plugins: - remark-lint-blockquote-indentation - remark-lint-no-consecutive-blank-lines - - remark-lint-maximum-line-length - - 150 + - 120 +# GFM - autolink literals, footnotes, strikethrough, tables, tasklist + - remark-gfm # Code - remark-lint-fenced-code-flag - remark-lint-fenced-code-marker @@ -37,7 +40,7 @@ plugins: - - remark-lint-unordered-list-marker-style - '-' - - remark-lint-list-item-indent - - space + - space # Tables - remark-lint-table-pipes - remark-lint-no-literal-urls diff --git a/.gitignore b/.gitignore index 3204cbe..a9b169b 100644 --- a/.gitignore +++ b/.gitignore @@ -202,6 +202,8 @@ ipython_config.py # commonly ignored for libraries. # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control #poetry.lock +# requirements extracted from poetry lock +requirements-lock.txt # pdm # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. diff --git a/CHANGELOG.md b/CHANGELOG.md index 2f0a1d4..0b30f5a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,28 +1,52 @@ # Changelog + All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). -## [Unreleased] +## [Unreleased](https://github.com/crim-ca/dlm-extension/tree/main) ### Added -- more [Task Enum](./README.md#task-enum) tasks -- [accelerator](./README#accelerators) options in [Runtime Object](./README#runtime-object) -- [Model Output Object](./README.md#model-output-object) +- n/a + +### Changed +- n/a + +### Deprecated +- n/a + +### Removed +- n/a + +### Fixed +- n/a + +## [0.1.1.alpha4](https://github.com/crim-ca/dlm-extension/tree/0.1.1.alpha4) + +### Added +- more [Task Enum](README.md#task-enum) tasks +- [Model Output Object](README.md#model-output-object) - batch_size and hardware summary -- [`disk_size`, `memory_size`](./README#architecture-object) -- [`hardware_summary`, `accelerator`, `accelerator_constrained`](./README#runtime-object) to specify hardware requirements for inference -- Use common metadata [Asset Object](https://github.com/radiantearth/stac-spec/blob/master/collection-spec/collection-spec.md#asset-object) to refer to model asset and source code. +- [`mlm:accelerator`, `mlm:accelerator_constrained`, `mlm:accelerator_summary`](./README.md#accelerator-type-enum) + to specify hardware requirements for the model +- Use common metadata + [Asset Object](https://github.com/radiantearth/stac-spec/blob/master/collection-spec/collection-spec.md#asset-object) + to refer to model asset and source code. - use `classification:classes` in Model Output - add `scene-classification` to the Enum Tasks to allow disambiguation between pixel-wise and patch-based classification ### Changed +- `disk_size` replaced by `file:size` (see [Best Practices - File Extension](best-practices.md#file-extension)) +- `memory_size` under `dlm:architecture` moved directly under Item properties as `mlm:memory_size` +- replaced all hardware/accelerator/runtime definitions into distinct `mlm` fields directly under the + STAC Item properties (top-level, not nested) to allow better search support by STAC API. - reorganized `dlm:architecture` nested fields to exist at the top level of properties as `mlm:name`, `mlm:summary` and so on to provide STAC API search capabilities. - replaced `normalization:mean`, etc. with [statistics](./README.md#bands-and-statistics) from STAC 1.1 common metadata - added `pydantic` models for internal schema objects in `stac_model` package and published to PYPI -- specified [rel_type](./README.md#relation-types) to be `derived_from` and specify how model item or collection json should be named +- specified [rel_type](README.md#relation-types) to be `derived_from` and + specify how model item or collection json should be named - replaced all Enum Tasks names to use hyphens instead of spaces - replaced `dlm:task` by `mlm:tasks` using an array of value instead of a single one, allowing models to represent multiple tasks they support simultaneously or interchangeably depending on context @@ -36,10 +60,14 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Removed - Data Object, replaced with [Model Input Object](./README.md#model-input-object) that uses the `name` field from - the [common metadata band object](https://github.com/radiantearth/stac-spec/blob/f9b3c59ba810541c9da70c5f8d39635f8cba7bcd/item-spec/common-metadata.md#bands) - which also records `data_type` and `nodata` type + the [common metadata band object][stac-bands] which also records `data_type` and `nodata` type + +### Fixed +- n/a + +[stac-bands]: https://github.com/radiantearth/stac-spec/blob/f9b3c59ba810541c9da70c5f8d39635f8cba7bcd/item-spec/common-metadata.md#bands -# TODO link release here +## [v1.0.0-beta3](https://github.com/crim-ca/dlm-extension/tree/v1.0.0-beta3) ### Added - Added example model architecture summary text. @@ -74,7 +102,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Fixed examples to refer to local files. - Fixed formatting of tables and descriptions in README. -[v1.0.0-beta2]: +## [v1.0.0-beta2](https://github.com/crim-ca/dlm-extension/tree/v1.0.0-beta2) ### Added - Initial release of the extension description and schema. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..0334f37 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,91 @@ +# How to contribute to stac-model + +## Project setup + +1. If you don't have `Poetry` installed run: + +```bash +make poetry-install +``` + +> This installs Poetry as a [standalone application][poetry-install]. +> If you prefer, you can simply install it inside your virtual environment. + +2. Initialize project dependencies with poetry and install `pre-commit` hooks: + +```bash +make install-dev +make pre-commit-install +``` + +You're then ready to run and test your contributions. + +To activate your `virtualenv` run `poetry shell`. + +Want to know more about Poetry? Check [its documentation][poetry-docs]. + +Poetry's [commands][poetry-cli] let you easily make descriptive python environments +and run commands in those environments, like: + +- `poetry add numpy@latest` +- `poetry run pytest` +- `poetry publish --build` + +etc. + +3. Run linting checks: + +```bash +make lint-all +``` + +4. Run `pytest` with + +```bash +make test +``` + +5. Upload your changes to your fork, then make a PR from there to the main repo: + +```bash +git checkout -b your-branch +git add . +git commit -m ":tada: Initial commit" +git remote add origin https://github.com/your-fork/stac-model.git +git push -u origin your-branch +``` + +## Building and releasing stac-model + +Building a new version of `stac-model` contains steps: + +- Bump the version with `poetry version `. + You can pass the new version explicitly, or a rule such as `major`, `minor`, or `patch`. + For more details, refer to the [Semantic Versions][semver] standard; +- Make a commit to `GitHub`; +- Create a `GitHub release`; +- And... publish :slight_smile: `poetry publish --build` + +### Before submitting + +Before submitting your code please do the following steps: + +1. Add any changes you want +2. Add tests for the new changes +3. Edit documentation if you have changed something significant +4. Run `make codestyle` to format your changes. +5. Run `make lint-all` to ensure that types, security and docstrings are okay. + +## Other help + +You can contribute by spreading a word about this library. +It would also be a huge contribution to write +a short article on how you are using this project. +You can also share how the ML Model extension does or does +not serve your needs with us in the Github Discussions or raise +Issues for bugs. + +[poetry-install]: https://github.com/python-poetry/install.python-poetry.org +[poetry-docs]: https://python-poetry.org/docs/ +[poetry-cli]: https://python-poetry.org/docs/cli/#commands +[semver]: https://semver.org/ diff --git a/CONTRIBUTING_STAC_MODEL.md b/CONTRIBUTING_STAC_MODEL.md deleted file mode 100644 index f9522f7..0000000 --- a/CONTRIBUTING_STAC_MODEL.md +++ /dev/null @@ -1,96 +0,0 @@ -# How to contribute to stac-model - -### Project setup - -1. If you don't have `Poetry` installed run: - -```bash -make poetry-install -``` - -> This installs Poetry as a [standalone application][fs1]. If you prefer, you can simply install it inside your virtual environment. - -2. Initialize project dependencies with poetry and install `pre-commit` hooks: - -```bash -make install -make pre-commit-install -``` - -You're then ready to run and test your contributions. - -To activate your `virtualenv` run `poetry shell`. - -Want to know more about Poetry? Check [its documentation][fs2]. - -Poetry's [commands][fs3] let you easily make descriptive python environments and run commands in those environments, like: - -- `poetry add numpy@latest` -- `poetry run pytest` -- `poetry publish --build` - -etc. - -3. Run the codestyle and other checks: - -```bash -make codestyle -``` - -Many checks are configured for this project. Command `make check-codestyle` will run ruff for linting and autoformatting. `make lint` will just run linting. `make check-safety` will look at the security of your code. - -Command `make lint-all` applies all checks. - - -4. Run `pytest` with - -```bash -make test -``` - - -5. Upload your changes to your fork, then make a PR from there to the main repo: - -```bash -git checkout -b your-branch -git add . -git commit -m ":tada: Initial commit" -git remote add origin https://github.com/your-fork/stac-model.git -git push -u origin your-branch -``` - -### Building and releasing stac-model - -Building a new version of `stac-model` contains steps: - -- Bump the version with `poetry version `. You can pass the new version explicitly, or a rule such as `major`, `minor`, or `patch`. For more details, refer to the [Semantic Versions][fs4] standard; -- Make a commit to `GitHub`; -- Create a `GitHub release`; -- And... publish :slight_smile: `poetry publish --build` - -### Before submitting - -Before submitting your code please do the following steps: - -1. Add any changes you want -1. Add tests for the new changes -1. Edit documentation if you have changed something significant -1. Run `make codestyle` to format your changes. -1. Run `make lint-all` to ensure that types, security and docstrings are okay. - -## Other help - -You can contribute by spreading a word about this library. -It would also be a huge contribution to write -a short article on how you are using this project. -You can also share how the ML Model extension does or does -not serve your needs with us in the Github Discussions or raise -Issues for bugs. - -[fs1]: https://github.com/python-poetry/install.python-poetry.org -[fs2]: https://python-poetry.org/docs/ -[fs3]: https://python-poetry.org/docs/cli/#commands -[fs4]: https://semver.org/ - -[li2]: http://www.pydocstyle.org/en/stable/ -[li3]: https://github.com/jsh9/pydoclint diff --git a/Makefile b/Makefile index 950376d..e7bc0dd 100644 --- a/Makefile +++ b/Makefile @@ -23,7 +23,7 @@ poetry-env: #* Installation .PHONY: install install: poetry-env - poetry lock -n && poetry export --without-hashes > requirements.txt + poetry lock -n && poetry export --without-hashes > requirements-lock.txt poetry install -n -poetry run mypy --install-types --non-interactive ./ @@ -68,8 +68,30 @@ lint: .PHONY: check-lint check-lint: lint +.PHONY: install-npm +install-npm: + npm install + +.PHONY: check-markdown +check-markdown: install-npm + npm run check-markdown + +.PHONY: format-markdown +format-markdown: install-npm + npm run format-markdown + +.PHONY: check-examples +check-examples: install-npm + npm run check-examples + +.PHONY: format-examples +format-examples: install-npm + npm run format-examples + +fix-%: format-%s + .PHONY: lint-all -lint: test lint mypy check-safety +lint: test lint mypy check-safety check-markdown .PHONY: update-dev-deps update-dev-deps: diff --git a/README.md b/README.md index 876c6c7..9a0f58f 100644 --- a/README.md +++ b/README.md @@ -38,14 +38,19 @@ The Machine Learning Model Extension purposely omits and delegates some definiti reusability and avoid metadata duplication whenever possible. A properly defined MLM STAC Item/Collection should almost never have the Machine Learning Model Extension exclusively in `stac_extensions`. -Check the original [Technical Report](https://github.com/crim-ca/CCCOT03/raw/main/CCCOT03_Rapport%20Final_FINAL_EN.pdf) -for an earlier version of the MLM Extension, formerly known as the Deep Learning Model Extension (DLM). +For details about the earlier (legacy) version of the MLM Extension, formerly known as +the *Deep Learning Model Extension* (DLM), please refer to the [DLM LEGACY](README_DLM_LEGACY.md) document. DLM was renamed to the current MLM Extension and refactored to form a cohesive definition across all machine learning approaches, regardless of whether the approach constitutes a deep neural network or other statistical approach. It also combines multiple definitions from the predecessor [ML-Model](https://github.com/stac-extensions/ml-model) extension to synthesize common use cases into a single reference for Machine Learning Models. -![Image Description](https://i.imgur.com/cVAg5sA.png) +For more details about the [`stac-model`](stac_model) Python package, which provides definitions of the MLM extension +using both [`Pydantic`](https://docs.pydantic.dev/latest/) and [`PySTAC`](https://pystac.readthedocs.io/en/stable/) +connectors, please refer to the [STAC Model](README_STAC_MODEL.md) document. + +> :warning:
+> FIXME: update examples - Examples: - [Example with a ??? trained with torchgeo](examples/item.json) TODO update example @@ -63,24 +68,24 @@ The fields in the table below can be used in these parts of STAC documents: - [x] Assets (for both Collections and Items, incl. Item Asset Definitions in Collections, except `mlm:name`) - [ ] Links -| Field Name | Type | Description | -|-----------------------------|--------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| mlm:name | string | **REQUIRED** A unique name for the model. This can include, but must be distinct, from simply naming the model architecture. If there is a publication or other published work related to the model, use the official name of the model. | -| mlm:architecture | [Model Architecture](#model-architecture) string | **REQUIRED** A generic and well established architecture name of the model. | -| mlm:tasks | \[[Task Enum](#task-enum)] | **REQUIRED** Specifies the Machine Learning tasks for which the model can be used for. If multi-tasks outputs are provided by distinct model heads, specify all available tasks under the main properties and specify respective tasks in each [Model Output Object](#model-output-object). | -| mlm:framework | string | Framework used to train the model (ex: PyTorch, TensorFlow). | -| mlm:framework_version | string | The `framework` library version. Some models require a specific version of the machine learning `framework` to run. | -| mlm:memory_size | integer | The in-memory size of the model on the accelerator during inference (bytes). | -| mlm:total_parameters | integer | Total number of model parameters, including trainable and non-trainable parameters. | -| mlm:pretrained | boolean | Indicates if the model was pretrained. If the model was pretrained, consider providing `pretrained_source` if it is known. | -| mlm:pretrained_source | string \| null | The source of the pretraining. Can refer to popular pretraining datasets by name (i.e. Imagenet) or less known datasets by URL and description. If trained from scratch (i.e.: `pretrained = false`), the `null` value should be set explicitly. | -| mlm:batch_size_suggestion | integer | A suggested batch size for the accelerator and summarized hardware. | -| mlm:accelerator | [Accelerator Enum](#accelerator-enum) \| null | The intended computational hardware that runs inference. If undefined or set to `null` explicitly, the model does not require any specific accelerator. | -| mlm:accelerator_constrained | boolean | Indicates if the intended `accelerator` is the only `accelerator` that can run inference. If undefined, it should be assumed `false`. | -| mlm:accelerator_summary | string | A high level description of the `accelerator`, such as its specific generation, or other relevant inference details. | -| mlm:accelerator_count | integer | A minimum amount of `accelerator` instances required to run the model. | -| mlm:input | \[[Model Input Object](#model-input-object)] | **REQUIRED** Describes the transformation between the EO data and the model input. | -| mlm:output | \[[Model Output Object](#model-output-object)] | **REQUIRED** Describes each model output and how to interpret it. | +| Field Name | Type | Description | +|-----------------------------|---------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| mlm:name | string | **REQUIRED** A unique name for the model. This can include, but must be distinct, from simply naming the model architecture. If there is a publication or other published work related to the model, use the official name of the model. | +| mlm:architecture | [Model Architecture](#model-architecture) string | **REQUIRED** A generic and well established architecture name of the model. | +| mlm:tasks | \[[Task Enum](#task-enum)] | **REQUIRED** Specifies the Machine Learning tasks for which the model can be used for. If multi-tasks outputs are provided by distinct model heads, specify all available tasks under the main properties and specify respective tasks in each [Model Output Object](#model-output-object). | +| mlm:framework | string | Framework used to train the model (ex: PyTorch, TensorFlow). | +| mlm:framework_version | string | The `framework` library version. Some models require a specific version of the machine learning `framework` to run. | +| mlm:memory_size | integer | The in-memory size of the model on the accelerator during inference (bytes). | +| mlm:total_parameters | integer | Total number of model parameters, including trainable and non-trainable parameters. | +| mlm:pretrained | boolean | Indicates if the model was pretrained. If the model was pretrained, consider providing `pretrained_source` if it is known. | +| mlm:pretrained_source | string \| null | The source of the pretraining. Can refer to popular pretraining datasets by name (i.e. Imagenet) or less known datasets by URL and description. If trained from scratch (i.e.: `pretrained = false`), the `null` value should be set explicitly. | +| mlm:batch_size_suggestion | integer | A suggested batch size for the accelerator and summarized hardware. | +| mlm:accelerator | [Accelerator Type Enum](#accelerator-type-enum) \| null | The intended computational hardware that runs inference. If undefined or set to `null` explicitly, the model does not require any specific accelerator. | +| mlm:accelerator_constrained | boolean | Indicates if the intended `accelerator` is the only `accelerator` that can run inference. If undefined, it should be assumed `false`. | +| mlm:accelerator_summary | string | A high level description of the `accelerator`, such as its specific generation, or other relevant inference details. | +| mlm:accelerator_count | integer | A minimum amount of `accelerator` instances required to run the model. | +| mlm:input | \[[Model Input Object](#model-input-object)] | **REQUIRED** Describes the transformation between the EO data and the model input. | +| mlm:output | \[[Model Output Object](#model-output-object)] | **REQUIRED** Describes each model output and how to interpret it. | | mlm:hyperparameters | [Model Hyperparameters Object](#model-hyperparameters-object) | Additional hyperparameters relevant for the model. | To decide whether above fields should be applied under Item `properties` or under respective Assets, the context of @@ -96,12 +101,12 @@ In addition, fields from the multiple relevant extensions should be defined as a [Best Practices - Recommended Extensions to Compose with the ML Model Extension](best-practices.md#recommended-extensions-to-compose-with-the-ml-model-extension) for more details. -For the [Extent Object](https://github.com/radiantearth/stac-spec/blob/master/collection-spec/collection-spec.md#extent-object) +For the [Extent Object][stac-extent] in STAC Collections and the corresponding spatial and temporal fields in Items, please refer to section -[Best Practices - Using STAC Common Metadata Fields for the ML Model Extension](best-practices.md#using-stac-common-metadata-fields-for-the-ml-model-extension). +[Best Practices - Using STAC Common Metadata Fields for the ML Model Extension][stac-mlm-meta]. -[stac-ext-sci]: https://github.com/radiantearth/stac-spec/tree/v1.0.0-beta.2/extensions/scientific/README.md -[stac-ext-ver]: https://github.com/radiantearth/stac-spec/tree/v1.0.0-beta.2/extensions/version/README.md +[stac-mlm-meta]: best-practices.md#using-stac-common-metadata-fields-for-the-ml-model-extension +[stac-extent]: https://github.com/radiantearth/stac-spec/blob/master/collection-spec/collection-spec.md#extent-object ### Model Architecture @@ -188,7 +193,7 @@ It is recommended to define `accelerator` with one of the following values: - `intel-ipex-gpu` for models optimized with IPEX for Intel GPUs - `macos-arm` for models trained on Apple Silicon -> [!WARNING] +> :warning:
> If `mlm:accelerator = amd64`, this explicitly indicates that the model does not (and will not try to) use any > accelerator, even if some are available from the runtime environment. This is to be distinguished from > the value `mlm:accelerator = null`, which means that the model *could* make use of some accelerators if provided, @@ -228,7 +233,7 @@ representing bands information, including notably the `nodata` value, the `data_type` (see also [Data Type Enum](#data-type-enum)), and [Common Band Names][stac-band-names]. -> [!NOTE] +> :information_source:
> Due to how the schema for [`eo:bands`][stac-eo-band] is defined, it is not sufficient to *only* provide > the `eo:bands` property at the STAC Item level. The schema validation of the EO extension explicitly looks > for a corresponding set of bands under an Asset, and if none is found, it disallows `eo:bands` in the Item properties. @@ -239,7 +244,7 @@ and [Common Band Names][stac-band-names]. >

> For more details, refer to [stac-extensions/eo#12](https://github.com/stac-extensions/eo/issues/12). >
-> For an example, please refer to [examples/example_eo_bands.json](examples/example_eo_bands.json). +> For an example, please refer to [examples/item_eo_bands.json](examples/item_eo_bands.json). > Notably in this example, the `assets.weights.eo:bands` property provides the `name` to fulfill the Asset requirement, > while all additional band details are provided in `properties.eo:bands`. @@ -378,15 +383,14 @@ the following formats are recommended as alternative scripts and function refere | `docker` | string | An URI with image and tag to a Docker. | `ghcr.io/NAMESPACE/IMAGE_NAME:latest` | | `uri` | string | An URI to some binary or script. | `{"href": "https://raw.githubusercontent.com/ORG/REPO/TAG/package/cli.py", "type": "text/x-python"}` | -> [!NOTE] +> :information_source:
> Above definitions are only indicative, and more can be added as desired with even more custom definitions. > It is left as an implementation detail for users to resolve how these expressions should be handled at runtime. -> [!WARNING] +> :warning:
> See also discussion regarding additional processing expressions: > [stac-extensions/processing#31](https://github.com/stac-extensions/processing/issues/31) - [stac-proc-expr]: https://github.com/stac-extensions/processing#expression-object ### Model Output Object @@ -462,8 +466,8 @@ appropriate [MLM Asset Roles](#mlm-asset-roles) to ensure their discovery. Asset `roles` should include relevant names that describe them. This does not only include the [Recommended Asset Roles](https://github.com/radiantearth/stac-spec/blob/master/item-spec/item-spec.md#asset-roles) from the core specification, such as `data` or `metadata`, but also descriptors such as `mlm:model`, `mlm:weights` and -so on, as applicable for the relevant [MLM Asset](#mlm-assets) being described. Please refer to the following sections -for `roles` requirements by specific [MLM Asset](#mlm-assets). +so on, as applicable for the relevant MLM Assets being described. Please refer to the following sections +for `roles` requirements by specific MLM Assets. Note that `mlm:` prefixed roles are used for identification purpose of the Assets, but non-prefixed roles can be provided as well to offer generic descriptors. For example, `["mlm:model", "model", "data"]` could be considered for @@ -480,7 +484,7 @@ In order to provide more context, the following roles are also recommended were | mlm:model | `model` | Required role for [Model Asset](#model-asset). | | mlm:source_code | `code` | Required role for [Model Asset](#source-code-asset). | -> [!NOTE] +> :information_source:
> (*) These roles are offered as direct conversions from the previous extension > that provided [ML-Model Asset Roles][ml-model-asset-roles] to provide easier upgrade to the MLM extension. @@ -545,7 +549,7 @@ the users understand the source explicitly, although this is not strictly requir | Artifact Type | Description | |--------------------|--------------------------------------------------------------------------------------| -| `torch.save` | A model artifact obtained by [Serialized Pickle Object][pytorch.save] (i.e.: `.pt`). | +| `torch.save` | A model artifact obtained by [Serialized Pickle Object][pytorch-save] (i.e.: `.pt`). | | `torch.jit.script` | A model artifact obtained by [`TorchScript`][pytorch-jit-script]. | | `torch.export` | A model artifact obtained by [`torch.export`][pytorch-export] (i.e.: `.pt2`). | | `torch.compile` | A model artifact obtained by [`torch.compile`][pytorch-compile]. | @@ -620,7 +624,7 @@ You can also use other base images. Pytorch and Tensorflow offer docker images f - [Torchserve](https://pytorch.org/serve/) - [TFServing](https://github.com/tensorflow/serving) -## Relation types +## Relation Types The following types should be used as applicable `rel` types in the [Link Object](https://github.com/radiantearth/stac-spec/tree/master/item-spec/item-spec.md#link-object) @@ -650,7 +654,9 @@ for running tests are copied here for convenience. ### Running tests -The same checks that run as checks on PRs are part of the repository and can be run locally to verify that changes are valid. To run tests locally, you'll need `npm`, which is a standard part of any [node.js](https://nodejs.org/en/download/) installation. +The same checks that run as checks on PRs are part of the repository and can be run locally to verify that changes +are valid. To run tests locally, you'll need `npm`, which is a standard part of +any [node.js](https://nodejs.org/en/download/) installation. First, install everything with npm once. Navigate to the root of this repository and on your command line run: diff --git a/README_DLM_LEGACY.md b/README_DLM_LEGACY.md new file mode 100644 index 0000000..fbc815a --- /dev/null +++ b/README_DLM_LEGACY.md @@ -0,0 +1,9 @@ +# Deep Learning Model (DLM) Extension + +> :information_source:
+> This is legacy documentation references of Deep Learning Model extension +> preceding the current Machine Learning Model (MLM) extension. + +Check the original [Technical Report](https://github.com/crim-ca/CCCOT03/raw/main/CCCOT03_Rapport%20Final_FINAL_EN.pdf). + +![Image Description](https://i.imgur.com/cVAg5sA.png) diff --git a/README_STAC_MODEL.md b/README_STAC_MODEL.md index 58556c1..24aa435 100644 --- a/README_STAC_MODEL.md +++ b/README_STAC_MODEL.md @@ -1,5 +1,7 @@ # stac-model + +
[![Python support][bp1]][bp2] @@ -15,43 +17,47 @@ [![Semantic versions][blic3]][bp5] [![Pipelines][bscm6]][bscm7] -_A PydanticV2 validation and serialization library for the STAC ML Model Extension_ +_A PydanticV2 and PySTAC validation and serialization library for the STAC ML Model Extension_
+> :warning:
+> FIXME: update description with ML framework connectors (pytorch, scikit-learn, etc.) + ## Installation -```bash +```shell pip install -U stac-model ``` or install with `Poetry`: -```bash +```shell poetry add stac-model ``` Then you can run -```bash +```shell stac-model --help ``` -## Creating an example metadata json +## Creating example metadata JSON for a STAC Item -``` +```shell stac-model ``` -This will make [this example item](./examples/example.json) for an example model. +This will make [this example item](./examples/item.json) for an example model. ## :chart_with_upwards_trend: Releases -You can see the list of available releases on the [GitHub Releases][r1] page. +You can see the list of available releases on the [GitHub Releases][github-releases] page. ## :page_facing_up: License [![License][blic1]][blic2] -This project is licenced under the terms of the `Apache Software License 2.0` licence. See [LICENSE][blic2] for more details. +This project is licenced under the terms of the `Apache Software License 2.0` licence. +See [LICENSE][blic2] for more details. ## :heartpulse: Credits [![Python project templated from galactipy.][bp6]][bp7] @@ -77,7 +83,7 @@ This project is licenced under the terms of the `Apache Software License 2.0` li [blic2]: https://github.com/stac-extensions/stac-model/blob/main/LICENCE [blic3]: https://img.shields.io/badge/%F0%9F%93%A6-semantic%20versions-4053D6?style=for-the-badge -[r1]: https://github.com/stac-extensions/stac-model/releases +[github-releases]: https://github.com/stac-extensions/stac-model/releases [bscm1]: https://img.shields.io/badge/GitHub-100000?style=for-the-badge&logo=github&logoColor=white [bscm2]: https://img.shields.io/github/v/release/stac-extensions/stac-model?style=for-the-badge&logo=semantic-release&color=347d39 diff --git a/best-practices.md b/best-practices.md index 7e14d3e..f574b1e 100644 --- a/best-practices.md +++ b/best-practices.md @@ -41,7 +41,7 @@ could include the bbox of "the world" `[-90, -180, 90, 180]` and the `start_date would ideally be generic values like `["1900-01-01T00:00:00Z", null]` (see warning below). However, due to limitations with the STAC 1.0 specification, this time extent is not applicable. -> [!WARNING] +> :warning:
> The `null` value is not allowed for datetime specification. > As a workaround, the `end_datetime` can be set with a "very large value" > (similarly to `start_datetime` set with a small value), such as `"9999-12-31T23:59:59Z"`. @@ -87,7 +87,8 @@ annotated with [`processing:level = L4`](https://github.com/stac-extensions/proc (as described below) to indicate that they correspond from the output of an ML model. > processing:level = L4
-> Model output or results from analyses of lower level data (i.e.: variables that are not directly measured by the instruments, but are derived from these measurements) +> Model output or results from analyses of lower level data +> (i.e.: variables that are not directly measured by the instruments, but are derived from these measurements). Furthermore, the [`processing:expression`](https://github.com/stac-extensions/processing?tab=readme-ov-file#expression-object) should be specified with a reference to the STAC Item employing the MLM extension to provide full context of the source @@ -186,13 +187,13 @@ MLM definition to indicate which class values can be contained in the resulting For more details, see the [Model Output Object](README.md#model-output-object) definition. -> [!NOTE] -> Update according to https://github.com/stac-extensions/classification/issues/48 +> :information_source:
+> Update according to [stac-extensions/classification#48](https://github.com/stac-extensions/classification/issues/48). ### Scientific Extension Provided that most models derive from previous scientific work, it is strongly recommended to employ the -[Scientific Extension](https://github.com/stac-extensions/scientific) to provide references corresponding to the +[Scientific Extension][stac-ext-sci] to provide references corresponding to the original source of the model (`sci:doi`, `sci:citation`). This can help users find more information about the model, its underlying architecture, or ways to improve it by piecing together the related work (`sci:publications`) that lead to its creation. @@ -200,6 +201,8 @@ lead to its creation. This extension can also be used for the purpose of publishing new models, by providing to users the necessary details regarding how they should cite its use (i.e.: `sci:citation` field and `cite-as` relation type). +[stac-ext-sci]: https://github.com/stac-extensions/scientific + ### File Extension In order to provide a reliable and reproducible machine learning pipeline, external references to data required by the @@ -249,14 +252,14 @@ inference strategies to apply a model should define the [Source Code Asset](READ This code is in itself ideal to guide users how to run it, and should therefore be replicated as an `example` link reference to offer more code samples to execute the model. -> [!NOTE] -> Update according to https://github.com/stac-extensions/example-links/issues/4 +> :information_source:
+> Update according to [stac-extensions/example-links#4](https://github.com/stac-extensions/example-links/issues/4). ### Version Extension In the even that a model is retrained with gradually added annotations or improved training strategies leading to better performances, the existing model and newer models represented by STAC Items with MLM should also make use of -the [Version Extension](https://github.com/stac-extensions/version). Using the fields and link relation types defined +the [Version Extension][stac-ext-version]. Using the fields and link relation types defined by this extension, the retraining cycle of the model can better be described, with a full history of the newer versions developed. @@ -264,3 +267,5 @@ Additionally, the `version:experimental` field should be considered for models b before widespread deployment. This can be particularly useful for annotating models experiments during cross-validation training process to find the "best model". This field could also be used to indicate if a model is provided for educational purposes only. + +[stac-ext-version]: https://github.com/stac-extensions/version diff --git a/package.json b/package.json index 46d280a..4c6ab38 100644 --- a/package.json +++ b/package.json @@ -4,6 +4,7 @@ "scripts": { "test": "npm run check-markdown && npm run check-examples", "check-markdown": "remark . -f -r .github/remark.yaml", + "format-markdown": "remark . -f -r .github/remark.yaml -o", "check-examples": "stac-node-validator . --lint --verbose --schemaMap https://stac-extensions.github.io/template/v1.0.0/schema.json=./json-schema/schema.json", "format-examples": "stac-node-validator . --format --schemaMap https://stac-extensions.github.io/template/v1.0.0/schema.json=./json-schema/schema.json" }, @@ -11,6 +12,7 @@ "remark-cli": "^8.0.0", "remark-lint": "^7.0.0", "remark-lint-no-html": "^2.0.0", + "remark-gfm": "^4.0.0", "remark-preset-lint-consistent": "^3.0.0", "remark-preset-lint-markdown-style-guide": "^3.0.0", "remark-preset-lint-recommended": "^4.0.0", diff --git a/poetry.lock b/poetry.lock index f01c4cb..98596dd 100644 --- a/poetry.lock +++ b/poetry.lock @@ -305,13 +305,13 @@ test = ["pytest (>=6)"] [[package]] name = "filelock" -version = "3.13.3" +version = "3.13.4" description = "A platform independent file lock." optional = false python-versions = ">=3.8" files = [ - {file = "filelock-3.13.3-py3-none-any.whl", hash = "sha256:5ffa845303983e7a0b7ae17636509bc97997d58afeafa72fb141a17b152284cb"}, - {file = "filelock-3.13.3.tar.gz", hash = "sha256:a79895a25bbefdf55d1a2a0a80968f7dbb28edcd6d4234a0afb3f37ecde4b546"}, + {file = "filelock-3.13.4-py3-none-any.whl", hash = "sha256:404e5e9253aa60ad457cae1be07c0f0ca90a63931200a47d9b6a6af84fd7b45f"}, + {file = "filelock-3.13.4.tar.gz", hash = "sha256:d13f466618bfde72bd2c18255e269f72542c6e70e7bac83a0232d6b1cc5c8cf4"}, ] [package.extras] @@ -335,13 +335,13 @@ license = ["ukkonen"] [[package]] name = "idna" -version = "3.6" +version = "3.7" description = "Internationalized Domain Names in Applications (IDNA)" optional = false python-versions = ">=3.5" files = [ - {file = "idna-3.6-py3-none-any.whl", hash = "sha256:c05567e9c24a6b9faaa835c4821bad0590fbb9d5779e7caa6e1cc4978e7eb24f"}, - {file = "idna-3.6.tar.gz", hash = "sha256:9ecdbbd083b06798ae1e86adcbfe8ab1479cf864e4ee30fe4e46a003d12491ca"}, + {file = "idna-3.7-py3-none-any.whl", hash = "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0"}, + {file = "idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc"}, ] [[package]] @@ -599,18 +599,18 @@ files = [ [[package]] name = "pydantic" -version = "2.7.0b1" +version = "2.7.0" description = "Data validation using Python type hints" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic-2.7.0b1-py3-none-any.whl", hash = "sha256:c9cfcbfac6177f9e988fcffa727c42164ad03c3c8cd128057553c2d724fb6556"}, - {file = "pydantic-2.7.0b1.tar.gz", hash = "sha256:b0b45e2f249f7a304a8a3b724e03b206bd23ad584669fe31dbb3e38199fc9ff7"}, + {file = "pydantic-2.7.0-py3-none-any.whl", hash = "sha256:9dee74a271705f14f9a1567671d144a851c675b072736f0a7b2608fd9e495352"}, + {file = "pydantic-2.7.0.tar.gz", hash = "sha256:b5ecdd42262ca2462e2624793551e80911a1e989f462910bb81aef974b4bb383"}, ] [package.dependencies] annotated-types = ">=0.4.0" -pydantic-core = "2.18.0" +pydantic-core = "2.18.1" typing-extensions = ">=4.6.1" [package.extras] @@ -618,90 +618,90 @@ email = ["email-validator (>=2.0.0)"] [[package]] name = "pydantic-core" -version = "2.18.0" +version = "2.18.1" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic_core-2.18.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:c36ee17f0b85e98d5488a60bd4a022cb1e82f1995cc891bb371c1a15a52e5833"}, - {file = "pydantic_core-2.18.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3992d08de5ece507d334f166bd489eef46226ae26ecf890338a6bca710042d5e"}, - {file = "pydantic_core-2.18.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c3e9cc290c91f300a435f7c8dca9ce8e492fb2f3c57dddef7aa8e56e5d33f962"}, - {file = "pydantic_core-2.18.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1d68106d504c34bc9971e6eca22ef603a95b4531449ee8460f136bc6a77dc7a3"}, - {file = "pydantic_core-2.18.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2b5b8c26d5152be544ec9fcbac5087ffef1f3b831d0cba168016ac7e6063a29a"}, - {file = "pydantic_core-2.18.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:84eee9cd65aadba8aa45d3a5f7ce09a9263d2c1788dbb6d40f4f5345f76f97a6"}, - {file = "pydantic_core-2.18.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac856c69bd2aefcaa1c29ebb7d3c191e9de7aad063284c1e760c43983ad18c3a"}, - {file = "pydantic_core-2.18.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1b4a5de4867c582aa61ea7c83d977b9243c264c7e6c45d8b61dfb0f2bd243395"}, - {file = "pydantic_core-2.18.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:6559ffbf66fae9d333aaf8c34b67e83912999781120c90e6aed59ae6077ed74f"}, - {file = "pydantic_core-2.18.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4e04cb00fdb79b6b8e1085d2152e1a2dfa21640f6a962476740e1542e07e8b0f"}, - {file = "pydantic_core-2.18.0-cp310-none-win32.whl", hash = "sha256:a83fd7a2983c9bb6bd1aec7257a8a96c29d48c30d9d8b8ae13a44155163dd42d"}, - {file = "pydantic_core-2.18.0-cp310-none-win_amd64.whl", hash = "sha256:5d8f4e95917439ba4398d9e2ce2be9f5840e91ea63ae018b3b148d48e99e99c1"}, - {file = "pydantic_core-2.18.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:a99333701e0cf16ac8a646c92d5b9dc9f8cadd0a026f50bf0ddde34eede70bc3"}, - {file = "pydantic_core-2.18.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:647ce6122e6ae6b972076793851efd284c4b51b93ed4071d6735bcf44e663c03"}, - {file = "pydantic_core-2.18.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18b2dc6a2a027828377175613cfb3f69c40b347084886c2ca5bb1b713c3c0c1f"}, - {file = "pydantic_core-2.18.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8e179a23237f30452776ab3fd094fd0005f45615ab826c0bb077f5657f0e84db"}, - {file = "pydantic_core-2.18.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:569a28b5f7c5a1c9b9eea5b41f18f3e0235ec25212c4b7fa98add07e3b4fce2a"}, - {file = "pydantic_core-2.18.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ca09d373225203c5849200019d7bb8fc50d4f466e9d10d67205c3e2da1221df6"}, - {file = "pydantic_core-2.18.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:adf5935716452b77a64e51f1344c34aab8c2e956ba32da9d038dc7f73e2262c7"}, - {file = "pydantic_core-2.18.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2f5570b47048ad0421411e9bdf1b96eee8816aeaeea7c8db78877ecd9339685f"}, - {file = "pydantic_core-2.18.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a598c0786327db680ac04f8e5125cd4a729528713c09bb8fd0c40e66750bc89f"}, - {file = "pydantic_core-2.18.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cb7ee00b493cd28998d6cd6089e994f0cc08fed5113f5dd09b8bb8c27b5dc55c"}, - {file = "pydantic_core-2.18.0-cp311-none-win32.whl", hash = "sha256:7676ec76faab6c4dbc7fdaf644f70af27ccf1868c7157da352fb55206a35e4d3"}, - {file = "pydantic_core-2.18.0-cp311-none-win_amd64.whl", hash = "sha256:c21fe62521eaf617fbb04b0fcf9af085e8dc7ea3a3ee22da3af671475f29aed1"}, - {file = "pydantic_core-2.18.0-cp311-none-win_arm64.whl", hash = "sha256:c5ee382586174d3639092b32a1a7ba4cfdadd67b2539814ddc42542d6e622dd0"}, - {file = "pydantic_core-2.18.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:c141b49350139f94a94d9268b82c0e7f91b05f1f479b785de1a5499460e68864"}, - {file = "pydantic_core-2.18.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:68e0ee55f7d521a8e7556217219112a1e9bc55b4484c8959c24e2e1a0da874d9"}, - {file = "pydantic_core-2.18.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88adc4fc547c8f8b0025974c27fd4671ec2f7ee375859a1c88313a8a63b4615e"}, - {file = "pydantic_core-2.18.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6399de345097e76a3d7420a25411939fb72fcc51890847c8b8599a43fd0b7439"}, - {file = "pydantic_core-2.18.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:83def986dea51011b9bad66b7481aabff5863cd05bd17cab4f228378d918292b"}, - {file = "pydantic_core-2.18.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2ebe41d751e3347b5d5880498a965bd6523285ce5e7907d70de33c221dc347a4"}, - {file = "pydantic_core-2.18.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3fc20e48c936e1453e2797b28044f4cd3004c98296294b4aac31170ff44b8496"}, - {file = "pydantic_core-2.18.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:68827e0dc97c047e527dd6b86f5b4b1605faefa7a18d8f227d8f6754a6747f63"}, - {file = "pydantic_core-2.18.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d0bc6036cea7f7ba419ce1b8f2e0f8e27eddcde626fcad507edb5b7519073006"}, - {file = "pydantic_core-2.18.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:c96ec95751deb156d036b348e1eef758e82326989d7e2e9fc9479d1f30b90da3"}, - {file = "pydantic_core-2.18.0-cp312-none-win32.whl", hash = "sha256:f527522a0e5470e04c75cc2f3bb272f6940acc9e426a38a6ec60ae708c1f6d58"}, - {file = "pydantic_core-2.18.0-cp312-none-win_amd64.whl", hash = "sha256:6ef640a492dad6fbe289eb91a88d7f67d6ca984db556ee1a3891a5fff4a412d2"}, - {file = "pydantic_core-2.18.0-cp312-none-win_arm64.whl", hash = "sha256:362f29ffcf78b20d2507bd39c348233a33cb0c9d70bbb26e85fc521690683e2c"}, - {file = "pydantic_core-2.18.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:f1264b478a8e5283db4eea8344d53dc608dac862ea74b1f81d1edcd785451702"}, - {file = "pydantic_core-2.18.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4368aaa4d68acf489b67a7ecb0d6f8a0c478a4491e4eb8c2b9f352800322ed32"}, - {file = "pydantic_core-2.18.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aabce6144cc2cd43e2363b463f6ba2979c7b77bad7e3ac732fc69b19e097ffcd"}, - {file = "pydantic_core-2.18.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:765b970000068ce5b42c7ffab0bcc86fd8ce141a9e3910c6f9b1bcdea158b233"}, - {file = "pydantic_core-2.18.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7d0c402862402430378e72927763c5f71554db494006d32f15d48d80dca25ef1"}, - {file = "pydantic_core-2.18.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dd2b5c7eff2e1e4d97a5d7f2e399301e774d10f883fd355689f5e225c2283c42"}, - {file = "pydantic_core-2.18.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b39578677908dca286c7a6565512f0321dd4591a9bd013c34c3e3004316a814"}, - {file = "pydantic_core-2.18.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:15c91b087938e1917e6f66c82928808312df403f869affb48a6d1fb9aca948c2"}, - {file = "pydantic_core-2.18.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:702ddde99e36cc25d674ae3bdd21aeab0460e7bdf3f587057db2240485e48366"}, - {file = "pydantic_core-2.18.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:26b9f7654f7d076e35f51f369b885efe877313d9d9fab3d6d291ad3ea25e10dd"}, - {file = "pydantic_core-2.18.0-cp38-none-win32.whl", hash = "sha256:d714d80d505db509781e686b1ec6ae0f0f4d0ce5ee3a91a75a41d4da2592276f"}, - {file = "pydantic_core-2.18.0-cp38-none-win_amd64.whl", hash = "sha256:7f4e623d413d78dac0e66f6aff68d6ea43993acd954fbb1840fffebf0ef3e90a"}, - {file = "pydantic_core-2.18.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:347efc12f055c44383d8b41e7ee72a6189156d9bfaa2952c349856432b3cae91"}, - {file = "pydantic_core-2.18.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d432e0c0177ae5b64f3c302b7a9a62b36b9abe3210d078540fd633d90144375b"}, - {file = "pydantic_core-2.18.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:86e05c39ed6862d6864771f57d29e31ace0e91c3b8971bf5d53b2ed9156a025e"}, - {file = "pydantic_core-2.18.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4dd1aa6c7f3bea171d237a70abc105e3cda903c4ba95aea82bec11e59d45833e"}, - {file = "pydantic_core-2.18.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1cfb9b1879cbf5a87b1b3be76ae312866b96adbc6b5c55c5e9a3934f1c0d242f"}, - {file = "pydantic_core-2.18.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aff8b042ce90ec873d7dd97302cadeac9768c0e536cf2452ee34e1c50a9e466d"}, - {file = "pydantic_core-2.18.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d0a30b40b76306b58e951e2eaaafdd94292df188efe33c72fd1f503a1ea375a"}, - {file = "pydantic_core-2.18.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e1065cb8c9c14ea6a8c76c7c113b4d8173be2dca984c5a3ab0d6ce364ea8b502"}, - {file = "pydantic_core-2.18.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b2772b00d0d1a0e2bfe463526f23637dbc8c7fa3c80c43bca66fe4312406412a"}, - {file = "pydantic_core-2.18.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ef52699c236366c4b18b485e9eecc3e5f215ef89b08e3e02a3a16a5abc97a69c"}, - {file = "pydantic_core-2.18.0-cp39-none-win32.whl", hash = "sha256:68b0ea179fc4ca681c651f272a9d0d42ad2a6e352f3d431c3cfba490719e40a0"}, - {file = "pydantic_core-2.18.0-cp39-none-win_amd64.whl", hash = "sha256:25b94e99e7aee8760c62a22e1dae2946318d2c44bdeb9be5f23ae1433cd6ba0f"}, - {file = "pydantic_core-2.18.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:2269c1c4ab33b1cf091da878fbb739d00027649394c6c4e95a10faf5efec12b5"}, - {file = "pydantic_core-2.18.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:59f6a4444daed0265773ad6fed1495e212bb3b8e1157957b67505aa772645674"}, - {file = "pydantic_core-2.18.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ceb5a387c50d751dd25e677b5928b57ba69ee4151657369e3ead1664e12a02a"}, - {file = "pydantic_core-2.18.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d9d9a17bdcf50312d3775bb60fe3c2f4b0fd5443b2705af58e491466fde291e3"}, - {file = "pydantic_core-2.18.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3a91c26005f4950d09380c82fe12b7014ca56dbc4d32e4f5a3ca5d8879d68170"}, - {file = "pydantic_core-2.18.0-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:057cb50ccdcbeef19e517cfa4ac8be8b3220dcee153770bb52d266c219e1c3d3"}, - {file = "pydantic_core-2.18.0-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:70696bf40bb588f5d62b0e79fde72d432e909551c3f2f3bfcb1674d7cacc7007"}, - {file = "pydantic_core-2.18.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:8a6d93401b503a54a4ce5ddc9ccd6f5b89b271b1fe0c72fc4428443b2451d765"}, - {file = "pydantic_core-2.18.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b496cab9ac75c8e7bda7d17e8a2d0db2f610dcced5ef465ef19122a17245b0f8"}, - {file = "pydantic_core-2.18.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:6d5c13ee3a9052f4ca8e7dd65dac9749c503dd96974ed1f908e0b933b9c689be"}, - {file = "pydantic_core-2.18.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0d86a800371db0590804881346b8610bd62c5f5396d544da5ae814a863a9e1b"}, - {file = "pydantic_core-2.18.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6598ed75a1ac49784a042af54cf2db3febfa2642717b12abaf6745339f69b5d7"}, - {file = "pydantic_core-2.18.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8ccf3e031d2dadf999d78d543d9ec9ce9fef40ae8a3c3a5a35041709d734d0d2"}, - {file = "pydantic_core-2.18.0-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:324018576490157103965281df89d287cbf18415fb3fcbb0a66efa23f2b5a497"}, - {file = "pydantic_core-2.18.0-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:5b109e4a7828b7cd5fa7bb63c6125203711298d0b1f1b83d0f9786c7ce3d689b"}, - {file = "pydantic_core-2.18.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:b770ae0d064a2d858f68c933217e01ea372de25685a52b4e98b26ea5684811c0"}, - {file = "pydantic_core-2.18.0.tar.gz", hash = "sha256:a6d075404af8b8feb42f86196e08053bfae282af2701321f36a1553e966ce1f0"}, + {file = "pydantic_core-2.18.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:ee9cf33e7fe14243f5ca6977658eb7d1042caaa66847daacbd2117adb258b226"}, + {file = "pydantic_core-2.18.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6b7bbb97d82659ac8b37450c60ff2e9f97e4eb0f8a8a3645a5568b9334b08b50"}, + {file = "pydantic_core-2.18.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df4249b579e75094f7e9bb4bd28231acf55e308bf686b952f43100a5a0be394c"}, + {file = "pydantic_core-2.18.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d0491006a6ad20507aec2be72e7831a42efc93193d2402018007ff827dc62926"}, + {file = "pydantic_core-2.18.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2ae80f72bb7a3e397ab37b53a2b49c62cc5496412e71bc4f1277620a7ce3f52b"}, + {file = "pydantic_core-2.18.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:58aca931bef83217fca7a390e0486ae327c4af9c3e941adb75f8772f8eeb03a1"}, + {file = "pydantic_core-2.18.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1be91ad664fc9245404a789d60cba1e91c26b1454ba136d2a1bf0c2ac0c0505a"}, + {file = "pydantic_core-2.18.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:667880321e916a8920ef49f5d50e7983792cf59f3b6079f3c9dac2b88a311d17"}, + {file = "pydantic_core-2.18.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:f7054fdc556f5421f01e39cbb767d5ec5c1139ea98c3e5b350e02e62201740c7"}, + {file = "pydantic_core-2.18.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:030e4f9516f9947f38179249778709a460a3adb516bf39b5eb9066fcfe43d0e6"}, + {file = "pydantic_core-2.18.1-cp310-none-win32.whl", hash = "sha256:2e91711e36e229978d92642bfc3546333a9127ecebb3f2761372e096395fc649"}, + {file = "pydantic_core-2.18.1-cp310-none-win_amd64.whl", hash = "sha256:9a29726f91c6cb390b3c2338f0df5cd3e216ad7a938762d11c994bb37552edb0"}, + {file = "pydantic_core-2.18.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:9ece8a49696669d483d206b4474c367852c44815fca23ac4e48b72b339807f80"}, + {file = "pydantic_core-2.18.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7a5d83efc109ceddb99abd2c1316298ced2adb4570410defe766851a804fcd5b"}, + {file = "pydantic_core-2.18.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f7973c381283783cd1043a8c8f61ea5ce7a3a58b0369f0ee0ee975eaf2f2a1b"}, + {file = "pydantic_core-2.18.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:54c7375c62190a7845091f521add19b0f026bcf6ae674bdb89f296972272e86d"}, + {file = "pydantic_core-2.18.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dd63cec4e26e790b70544ae5cc48d11b515b09e05fdd5eff12e3195f54b8a586"}, + {file = "pydantic_core-2.18.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:561cf62c8a3498406495cfc49eee086ed2bb186d08bcc65812b75fda42c38294"}, + {file = "pydantic_core-2.18.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:68717c38a68e37af87c4da20e08f3e27d7e4212e99e96c3d875fbf3f4812abfc"}, + {file = "pydantic_core-2.18.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2d5728e93d28a3c63ee513d9ffbac9c5989de8c76e049dbcb5bfe4b923a9739d"}, + {file = "pydantic_core-2.18.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f0f17814c505f07806e22b28856c59ac80cee7dd0fbb152aed273e116378f519"}, + {file = "pydantic_core-2.18.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d816f44a51ba5175394bc6c7879ca0bd2be560b2c9e9f3411ef3a4cbe644c2e9"}, + {file = "pydantic_core-2.18.1-cp311-none-win32.whl", hash = "sha256:09f03dfc0ef8c22622eaa8608caa4a1e189cfb83ce847045eca34f690895eccb"}, + {file = "pydantic_core-2.18.1-cp311-none-win_amd64.whl", hash = "sha256:27f1009dc292f3b7ca77feb3571c537276b9aad5dd4efb471ac88a8bd09024e9"}, + {file = "pydantic_core-2.18.1-cp311-none-win_arm64.whl", hash = "sha256:48dd883db92e92519201f2b01cafa881e5f7125666141a49ffba8b9facc072b0"}, + {file = "pydantic_core-2.18.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:b6b0e4912030c6f28bcb72b9ebe4989d6dc2eebcd2a9cdc35fefc38052dd4fe8"}, + {file = "pydantic_core-2.18.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f3202a429fe825b699c57892d4371c74cc3456d8d71b7f35d6028c96dfecad31"}, + {file = "pydantic_core-2.18.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a3982b0a32d0a88b3907e4b0dc36809fda477f0757c59a505d4e9b455f384b8b"}, + {file = "pydantic_core-2.18.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:25595ac311f20e5324d1941909b0d12933f1fd2171075fcff763e90f43e92a0d"}, + {file = "pydantic_core-2.18.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:14fe73881cf8e4cbdaded8ca0aa671635b597e42447fec7060d0868b52d074e6"}, + {file = "pydantic_core-2.18.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ca976884ce34070799e4dfc6fbd68cb1d181db1eefe4a3a94798ddfb34b8867f"}, + {file = "pydantic_core-2.18.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:684d840d2c9ec5de9cb397fcb3f36d5ebb6fa0d94734f9886032dd796c1ead06"}, + {file = "pydantic_core-2.18.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:54764c083bbe0264f0f746cefcded6cb08fbbaaf1ad1d78fb8a4c30cff999a90"}, + {file = "pydantic_core-2.18.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:201713f2f462e5c015b343e86e68bd8a530a4f76609b33d8f0ec65d2b921712a"}, + {file = "pydantic_core-2.18.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:fd1a9edb9dd9d79fbeac1ea1f9a8dd527a6113b18d2e9bcc0d541d308dae639b"}, + {file = "pydantic_core-2.18.1-cp312-none-win32.whl", hash = "sha256:d5e6b7155b8197b329dc787356cfd2684c9d6a6b1a197f6bbf45f5555a98d411"}, + {file = "pydantic_core-2.18.1-cp312-none-win_amd64.whl", hash = "sha256:9376d83d686ec62e8b19c0ac3bf8d28d8a5981d0df290196fb6ef24d8a26f0d6"}, + {file = "pydantic_core-2.18.1-cp312-none-win_arm64.whl", hash = "sha256:c562b49c96906b4029b5685075fe1ebd3b5cc2601dfa0b9e16c2c09d6cbce048"}, + {file = "pydantic_core-2.18.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:3e352f0191d99fe617371096845070dee295444979efb8f27ad941227de6ad09"}, + {file = "pydantic_core-2.18.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c0295d52b012cbe0d3059b1dba99159c3be55e632aae1999ab74ae2bd86a33d7"}, + {file = "pydantic_core-2.18.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:56823a92075780582d1ffd4489a2e61d56fd3ebb4b40b713d63f96dd92d28144"}, + {file = "pydantic_core-2.18.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:dd3f79e17b56741b5177bcc36307750d50ea0698df6aa82f69c7db32d968c1c2"}, + {file = "pydantic_core-2.18.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:38a5024de321d672a132b1834a66eeb7931959c59964b777e8f32dbe9523f6b1"}, + {file = "pydantic_core-2.18.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d2ce426ee691319d4767748c8e0895cfc56593d725594e415f274059bcf3cb76"}, + {file = "pydantic_core-2.18.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2adaeea59849ec0939af5c5d476935f2bab4b7f0335b0110f0f069a41024278e"}, + {file = "pydantic_core-2.18.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9b6431559676a1079eac0f52d6d0721fb8e3c5ba43c37bc537c8c83724031feb"}, + {file = "pydantic_core-2.18.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:85233abb44bc18d16e72dc05bf13848a36f363f83757541f1a97db2f8d58cfd9"}, + {file = "pydantic_core-2.18.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:641a018af4fe48be57a2b3d7a1f0f5dbca07c1d00951d3d7463f0ac9dac66622"}, + {file = "pydantic_core-2.18.1-cp38-none-win32.whl", hash = "sha256:63d7523cd95d2fde0d28dc42968ac731b5bb1e516cc56b93a50ab293f4daeaad"}, + {file = "pydantic_core-2.18.1-cp38-none-win_amd64.whl", hash = "sha256:907a4d7720abfcb1c81619863efd47c8a85d26a257a2dbebdb87c3b847df0278"}, + {file = "pydantic_core-2.18.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:aad17e462f42ddbef5984d70c40bfc4146c322a2da79715932cd8976317054de"}, + {file = "pydantic_core-2.18.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:94b9769ba435b598b547c762184bcfc4783d0d4c7771b04a3b45775c3589ca44"}, + {file = "pydantic_core-2.18.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:80e0e57cc704a52fb1b48f16d5b2c8818da087dbee6f98d9bf19546930dc64b5"}, + {file = "pydantic_core-2.18.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:76b86e24039c35280ceee6dce7e62945eb93a5175d43689ba98360ab31eebc4a"}, + {file = "pydantic_core-2.18.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:12a05db5013ec0ca4a32cc6433f53faa2a014ec364031408540ba858c2172bb0"}, + {file = "pydantic_core-2.18.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:250ae39445cb5475e483a36b1061af1bc233de3e9ad0f4f76a71b66231b07f88"}, + {file = "pydantic_core-2.18.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a32204489259786a923e02990249c65b0f17235073149d0033efcebe80095570"}, + {file = "pydantic_core-2.18.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6395a4435fa26519fd96fdccb77e9d00ddae9dd6c742309bd0b5610609ad7fb2"}, + {file = "pydantic_core-2.18.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:2533ad2883f001efa72f3d0e733fb846710c3af6dcdd544fe5bf14fa5fe2d7db"}, + {file = "pydantic_core-2.18.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:b560b72ed4816aee52783c66854d96157fd8175631f01ef58e894cc57c84f0f6"}, + {file = "pydantic_core-2.18.1-cp39-none-win32.whl", hash = "sha256:582cf2cead97c9e382a7f4d3b744cf0ef1a6e815e44d3aa81af3ad98762f5a9b"}, + {file = "pydantic_core-2.18.1-cp39-none-win_amd64.whl", hash = "sha256:ca71d501629d1fa50ea7fa3b08ba884fe10cefc559f5c6c8dfe9036c16e8ae89"}, + {file = "pydantic_core-2.18.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e178e5b66a06ec5bf51668ec0d4ac8cfb2bdcb553b2c207d58148340efd00143"}, + {file = "pydantic_core-2.18.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:72722ce529a76a4637a60be18bd789d8fb871e84472490ed7ddff62d5fed620d"}, + {file = "pydantic_core-2.18.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2fe0c1ce5b129455e43f941f7a46f61f3d3861e571f2905d55cdbb8b5c6f5e2c"}, + {file = "pydantic_core-2.18.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d4284c621f06a72ce2cb55f74ea3150113d926a6eb78ab38340c08f770eb9b4d"}, + {file = "pydantic_core-2.18.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1a0c3e718f4e064efde68092d9d974e39572c14e56726ecfaeebbe6544521f47"}, + {file = "pydantic_core-2.18.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:2027493cc44c23b598cfaf200936110433d9caa84e2c6cf487a83999638a96ac"}, + {file = "pydantic_core-2.18.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:76909849d1a6bffa5a07742294f3fa1d357dc917cb1fe7b470afbc3a7579d539"}, + {file = "pydantic_core-2.18.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ee7ccc7fb7e921d767f853b47814c3048c7de536663e82fbc37f5eb0d532224b"}, + {file = "pydantic_core-2.18.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:ee2794111c188548a4547eccc73a6a8527fe2af6cf25e1a4ebda2fd01cdd2e60"}, + {file = "pydantic_core-2.18.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:a139fe9f298dc097349fb4f28c8b81cc7a202dbfba66af0e14be5cfca4ef7ce5"}, + {file = "pydantic_core-2.18.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d074b07a10c391fc5bbdcb37b2f16f20fcd9e51e10d01652ab298c0d07908ee2"}, + {file = "pydantic_core-2.18.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c69567ddbac186e8c0aadc1f324a60a564cfe25e43ef2ce81bcc4b8c3abffbae"}, + {file = "pydantic_core-2.18.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:baf1c7b78cddb5af00971ad5294a4583188bda1495b13760d9f03c9483bb6203"}, + {file = "pydantic_core-2.18.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:2684a94fdfd1b146ff10689c6e4e815f6a01141781c493b97342cdc5b06f4d5d"}, + {file = "pydantic_core-2.18.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:73c1bc8a86a5c9e8721a088df234265317692d0b5cd9e86e975ce3bc3db62a59"}, + {file = "pydantic_core-2.18.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:e60defc3c15defb70bb38dd605ff7e0fae5f6c9c7cbfe0ad7868582cb7e844a6"}, + {file = "pydantic_core-2.18.1.tar.gz", hash = "sha256:de9d3e8717560eb05e28739d1b35e4eac2e458553a52a301e51352a7ffc86a35"}, ] [package.dependencies] @@ -1195,72 +1195,72 @@ gitlab = ["python-gitlab (>=1.3.0)"] [[package]] name = "setuptools" -version = "69.2.0" +version = "69.5.1" description = "Easily download, build, install, upgrade, and uninstall Python packages" optional = false python-versions = ">=3.8" files = [ - {file = "setuptools-69.2.0-py3-none-any.whl", hash = "sha256:c21c49fb1042386df081cb5d86759792ab89efca84cf114889191cd09aacc80c"}, - {file = "setuptools-69.2.0.tar.gz", hash = "sha256:0ff4183f8f42cd8fa3acea16c45205521a4ef28f73c6391d8a25e92893134f2e"}, + {file = "setuptools-69.5.1-py3-none-any.whl", hash = "sha256:c636ac361bc47580504644275c9ad802c50415c7522212252c033bd15f301f32"}, + {file = "setuptools-69.5.1.tar.gz", hash = "sha256:6c1fccdac05a97e598fb0ae3bbed5904ccb317337a51139dcd51453611bbb987"}, ] [package.extras] -docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"] -testing = ["build[virtualenv]", "filelock (>=3.4.0)", "importlib-metadata", "ini2toml[lite] (>=0.9)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "mypy (==1.9)", "packaging (>=23.2)", "pip (>=19.1)", "pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-home (>=0.5)", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-ruff (>=0.2.1)", "pytest-timeout", "pytest-xdist (>=3)", "tomli", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] +docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"] +testing = ["build[virtualenv]", "filelock (>=3.4.0)", "importlib-metadata", "ini2toml[lite] (>=0.9)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "mypy (==1.9)", "packaging (>=23.2)", "pip (>=19.1)", "pytest (>=6,!=8.1.1)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-home (>=0.5)", "pytest-mypy", "pytest-perf", "pytest-ruff (>=0.2.1)", "pytest-timeout", "pytest-xdist (>=3)", "tomli", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] testing-integration = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "packaging (>=23.2)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"] [[package]] name = "shapely" -version = "2.0.3" +version = "2.0.4" description = "Manipulation and analysis of geometric objects" optional = false python-versions = ">=3.7" files = [ - {file = "shapely-2.0.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:af7e9abe180b189431b0f490638281b43b84a33a960620e6b2e8d3e3458b61a1"}, - {file = "shapely-2.0.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:98040462b36ced9671e266b95c326b97f41290d9d17504a1ee4dc313a7667b9c"}, - {file = "shapely-2.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:71eb736ef2843f23473c6e37f6180f90f0a35d740ab284321548edf4e55d9a52"}, - {file = "shapely-2.0.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:881eb9dbbb4a6419667e91fcb20313bfc1e67f53dbb392c6840ff04793571ed1"}, - {file = "shapely-2.0.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f10d2ccf0554fc0e39fad5886c839e47e207f99fdf09547bc687a2330efda35b"}, - {file = "shapely-2.0.3-cp310-cp310-win32.whl", hash = "sha256:6dfdc077a6fcaf74d3eab23a1ace5abc50c8bce56ac7747d25eab582c5a2990e"}, - {file = "shapely-2.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:64c5013dacd2d81b3bb12672098a0b2795c1bf8190cfc2980e380f5ef9d9e4d9"}, - {file = "shapely-2.0.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:56cee3e4e8159d6f2ce32e421445b8e23154fd02a0ac271d6a6c0b266a8e3cce"}, - {file = "shapely-2.0.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:619232c8276fded09527d2a9fd91a7885ff95c0ff9ecd5e3cb1e34fbb676e2ae"}, - {file = "shapely-2.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b2a7d256db6f5b4b407dc0c98dd1b2fcf1c9c5814af9416e5498d0a2e4307a4b"}, - {file = "shapely-2.0.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e45f0c8cd4583647db3216d965d49363e6548c300c23fd7e57ce17a03f824034"}, - {file = "shapely-2.0.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:13cb37d3826972a82748a450328fe02a931dcaed10e69a4d83cc20ba021bc85f"}, - {file = "shapely-2.0.3-cp311-cp311-win32.whl", hash = "sha256:9302d7011e3e376d25acd30d2d9e70d315d93f03cc748784af19b00988fc30b1"}, - {file = "shapely-2.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:6b464f2666b13902835f201f50e835f2f153f37741db88f68c7f3b932d3505fa"}, - {file = "shapely-2.0.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:e86e7cb8e331a4850e0c2a8b2d66dc08d7a7b301b8d1d34a13060e3a5b4b3b55"}, - {file = "shapely-2.0.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c91981c99ade980fc49e41a544629751a0ccd769f39794ae913e53b07b2f78b9"}, - {file = "shapely-2.0.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:bd45d456983dc60a42c4db437496d3f08a4201fbf662b69779f535eb969660af"}, - {file = "shapely-2.0.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:882fb1ffc7577e88c1194f4f1757e277dc484ba096a3b94844319873d14b0f2d"}, - {file = "shapely-2.0.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b9f2d93bff2ea52fa93245798cddb479766a18510ea9b93a4fb9755c79474889"}, - {file = "shapely-2.0.3-cp312-cp312-win32.whl", hash = "sha256:99abad1fd1303b35d991703432c9481e3242b7b3a393c186cfb02373bf604004"}, - {file = "shapely-2.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:6f555fe3304a1f40398977789bc4fe3c28a11173196df9ece1e15c5bc75a48db"}, - {file = "shapely-2.0.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a983cc418c1fa160b7d797cfef0e0c9f8c6d5871e83eae2c5793fce6a837fad9"}, - {file = "shapely-2.0.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18bddb8c327f392189a8d5d6b9a858945722d0bb95ccbd6a077b8e8fc4c7890d"}, - {file = "shapely-2.0.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:442f4dcf1eb58c5a4e3428d88e988ae153f97ab69a9f24e07bf4af8038536325"}, - {file = "shapely-2.0.3-cp37-cp37m-win32.whl", hash = "sha256:31a40b6e3ab00a4fd3a1d44efb2482278642572b8e0451abdc8e0634b787173e"}, - {file = "shapely-2.0.3-cp37-cp37m-win_amd64.whl", hash = "sha256:59b16976c2473fec85ce65cc9239bef97d4205ab3acead4e6cdcc72aee535679"}, - {file = "shapely-2.0.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:705efbce1950a31a55b1daa9c6ae1c34f1296de71ca8427974ec2f27d57554e3"}, - {file = "shapely-2.0.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:601c5c0058a6192df704cb889439f64994708563f57f99574798721e9777a44b"}, - {file = "shapely-2.0.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f24ecbb90a45c962b3b60d8d9a387272ed50dc010bfe605f1d16dfc94772d8a1"}, - {file = "shapely-2.0.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8c2a2989222c6062f7a0656e16276c01bb308bc7e5d999e54bf4e294ce62e76"}, - {file = "shapely-2.0.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42bceb9bceb3710a774ce04908fda0f28b291323da2688f928b3f213373b5aee"}, - {file = "shapely-2.0.3-cp38-cp38-win32.whl", hash = "sha256:54d925c9a311e4d109ec25f6a54a8bd92cc03481a34ae1a6a92c1fe6729b7e01"}, - {file = "shapely-2.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:300d203b480a4589adefff4c4af0b13919cd6d760ba3cbb1e56275210f96f654"}, - {file = "shapely-2.0.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:083d026e97b6c1f4a9bd2a9171c7692461092ed5375218170d91705550eecfd5"}, - {file = "shapely-2.0.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:27b6e1910094d93e9627f2664121e0e35613262fc037051680a08270f6058daf"}, - {file = "shapely-2.0.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:71b2de56a9e8c0e5920ae5ddb23b923490557ac50cb0b7fa752761bf4851acde"}, - {file = "shapely-2.0.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4d279e56bbb68d218d63f3efc80c819cedcceef0e64efbf058a1df89dc57201b"}, - {file = "shapely-2.0.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88566d01a30f0453f7d038db46bc83ce125e38e47c5f6bfd4c9c287010e9bf74"}, - {file = "shapely-2.0.3-cp39-cp39-win32.whl", hash = "sha256:58afbba12c42c6ed44c4270bc0e22f3dadff5656d711b0ad335c315e02d04707"}, - {file = "shapely-2.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:5026b30433a70911979d390009261b8c4021ff87c7c3cbd825e62bb2ffa181bc"}, - {file = "shapely-2.0.3.tar.gz", hash = "sha256:4d65d0aa7910af71efa72fd6447e02a8e5dd44da81a983de9d736d6e6ccbe674"}, + {file = "shapely-2.0.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:011b77153906030b795791f2fdfa2d68f1a8d7e40bce78b029782ade3afe4f2f"}, + {file = "shapely-2.0.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9831816a5d34d5170aa9ed32a64982c3d6f4332e7ecfe62dc97767e163cb0b17"}, + {file = "shapely-2.0.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5c4849916f71dc44e19ed370421518c0d86cf73b26e8656192fcfcda08218fbd"}, + {file = "shapely-2.0.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:841f93a0e31e4c64d62ea570d81c35de0f6cea224568b2430d832967536308e6"}, + {file = "shapely-2.0.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b4431f522b277c79c34b65da128029a9955e4481462cbf7ebec23aab61fc58"}, + {file = "shapely-2.0.4-cp310-cp310-win32.whl", hash = "sha256:92a41d936f7d6743f343be265ace93b7c57f5b231e21b9605716f5a47c2879e7"}, + {file = "shapely-2.0.4-cp310-cp310-win_amd64.whl", hash = "sha256:30982f79f21bb0ff7d7d4a4e531e3fcaa39b778584c2ce81a147f95be1cd58c9"}, + {file = "shapely-2.0.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:de0205cb21ad5ddaef607cda9a3191eadd1e7a62a756ea3a356369675230ac35"}, + {file = "shapely-2.0.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7d56ce3e2a6a556b59a288771cf9d091470116867e578bebced8bfc4147fbfd7"}, + {file = "shapely-2.0.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:58b0ecc505bbe49a99551eea3f2e8a9b3b24b3edd2a4de1ac0dc17bc75c9ec07"}, + {file = "shapely-2.0.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:790a168a808bd00ee42786b8ba883307c0e3684ebb292e0e20009588c426da47"}, + {file = "shapely-2.0.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4310b5494271e18580d61022c0857eb85d30510d88606fa3b8314790df7f367d"}, + {file = "shapely-2.0.4-cp311-cp311-win32.whl", hash = "sha256:63f3a80daf4f867bd80f5c97fbe03314348ac1b3b70fb1c0ad255a69e3749879"}, + {file = "shapely-2.0.4-cp311-cp311-win_amd64.whl", hash = "sha256:c52ed79f683f721b69a10fb9e3d940a468203f5054927215586c5d49a072de8d"}, + {file = "shapely-2.0.4-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:5bbd974193e2cc274312da16b189b38f5f128410f3377721cadb76b1e8ca5328"}, + {file = "shapely-2.0.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:41388321a73ba1a84edd90d86ecc8bfed55e6a1e51882eafb019f45895ec0f65"}, + {file = "shapely-2.0.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0776c92d584f72f1e584d2e43cfc5542c2f3dd19d53f70df0900fda643f4bae6"}, + {file = "shapely-2.0.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c75c98380b1ede1cae9a252c6dc247e6279403fae38c77060a5e6186c95073ac"}, + {file = "shapely-2.0.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c3e700abf4a37b7b8b90532fa6ed5c38a9bfc777098bc9fbae5ec8e618ac8f30"}, + {file = "shapely-2.0.4-cp312-cp312-win32.whl", hash = "sha256:4f2ab0faf8188b9f99e6a273b24b97662194160cc8ca17cf9d1fb6f18d7fb93f"}, + {file = "shapely-2.0.4-cp312-cp312-win_amd64.whl", hash = "sha256:03152442d311a5e85ac73b39680dd64a9892fa42bb08fd83b3bab4fe6999bfa0"}, + {file = "shapely-2.0.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:994c244e004bc3cfbea96257b883c90a86e8cbd76e069718eb4c6b222a56f78b"}, + {file = "shapely-2.0.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05ffd6491e9e8958b742b0e2e7c346635033d0a5f1a0ea083547fcc854e5d5cf"}, + {file = "shapely-2.0.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2fbdc1140a7d08faa748256438291394967aa54b40009f54e8d9825e75ef6113"}, + {file = "shapely-2.0.4-cp37-cp37m-win32.whl", hash = "sha256:5af4cd0d8cf2912bd95f33586600cac9c4b7c5053a036422b97cfe4728d2eb53"}, + {file = "shapely-2.0.4-cp37-cp37m-win_amd64.whl", hash = "sha256:464157509ce4efa5ff285c646a38b49f8c5ef8d4b340f722685b09bb033c5ccf"}, + {file = "shapely-2.0.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:489c19152ec1f0e5c5e525356bcbf7e532f311bff630c9b6bc2db6f04da6a8b9"}, + {file = "shapely-2.0.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b79bbd648664aa6f44ef018474ff958b6b296fed5c2d42db60078de3cffbc8aa"}, + {file = "shapely-2.0.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:674d7baf0015a6037d5758496d550fc1946f34bfc89c1bf247cabdc415d7747e"}, + {file = "shapely-2.0.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6cd4ccecc5ea5abd06deeaab52fcdba372f649728050c6143cc405ee0c166679"}, + {file = "shapely-2.0.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fb5cdcbbe3080181498931b52a91a21a781a35dcb859da741c0345c6402bf00c"}, + {file = "shapely-2.0.4-cp38-cp38-win32.whl", hash = "sha256:55a38dcd1cee2f298d8c2ebc60fc7d39f3b4535684a1e9e2f39a80ae88b0cea7"}, + {file = "shapely-2.0.4-cp38-cp38-win_amd64.whl", hash = "sha256:ec555c9d0db12d7fd777ba3f8b75044c73e576c720a851667432fabb7057da6c"}, + {file = "shapely-2.0.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:3f9103abd1678cb1b5f7e8e1af565a652e036844166c91ec031eeb25c5ca8af0"}, + {file = "shapely-2.0.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:263bcf0c24d7a57c80991e64ab57cba7a3906e31d2e21b455f493d4aab534aaa"}, + {file = "shapely-2.0.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ddf4a9bfaac643e62702ed662afc36f6abed2a88a21270e891038f9a19bc08fc"}, + {file = "shapely-2.0.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:485246fcdb93336105c29a5cfbff8a226949db37b7473c89caa26c9bae52a242"}, + {file = "shapely-2.0.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8de4578e838a9409b5b134a18ee820730e507b2d21700c14b71a2b0757396acc"}, + {file = "shapely-2.0.4-cp39-cp39-win32.whl", hash = "sha256:9dab4c98acfb5fb85f5a20548b5c0abe9b163ad3525ee28822ffecb5c40e724c"}, + {file = "shapely-2.0.4-cp39-cp39-win_amd64.whl", hash = "sha256:31c19a668b5a1eadab82ff070b5a260478ac6ddad3a5b62295095174a8d26398"}, + {file = "shapely-2.0.4.tar.gz", hash = "sha256:5dc736127fac70009b8d309a0eeb74f3e08979e530cf7017f2f507ef62e6cfb8"}, ] [package.dependencies] -numpy = ">=1.14,<2" +numpy = ">=1.14,<3" [package.extras] docs = ["matplotlib", "numpydoc (==1.1.*)", "sphinx", "sphinx-book-theme", "sphinx-remove-toctrees"] @@ -1364,13 +1364,13 @@ test = ["black (>=22.3.0,<23.0.0)", "coverage (>=6.2,<7.0)", "isort (>=5.0.6,<6. [[package]] name = "typing-extensions" -version = "4.10.0" +version = "4.11.0" description = "Backported and Experimental Type Hints for Python 3.8+" optional = false python-versions = ">=3.8" files = [ - {file = "typing_extensions-4.10.0-py3-none-any.whl", hash = "sha256:69b1a937c3a517342112fb4c6df7e72fc39a38e7891a5730ed4985b5214b5475"}, - {file = "typing_extensions-4.10.0.tar.gz", hash = "sha256:b0abd7c89e8fb96f98db18d86106ff1d90ab692004eb746cf6eda2682f91b3cb"}, + {file = "typing_extensions-4.11.0-py3-none-any.whl", hash = "sha256:c1f94d72897edaf4ce775bb7558d5b79d8126906a14ea5ed1635921406c0387a"}, + {file = "typing_extensions-4.11.0.tar.gz", hash = "sha256:83f085bd5ca59c80295fc2a82ab5dac679cbe02b9f33f7d83af68e241bea51b0"}, ] [[package]] @@ -1392,13 +1392,13 @@ zstd = ["zstandard (>=0.18.0)"] [[package]] name = "virtualenv" -version = "20.25.1" +version = "20.25.3" description = "Virtual Python Environment builder" optional = false python-versions = ">=3.7" files = [ - {file = "virtualenv-20.25.1-py3-none-any.whl", hash = "sha256:961c026ac520bac5f69acb8ea063e8a4f071bcc9457b9c1f28f6b085c511583a"}, - {file = "virtualenv-20.25.1.tar.gz", hash = "sha256:e08e13ecdca7a0bd53798f356d5831434afa5b07b93f0abdf0797b7a06ffe197"}, + {file = "virtualenv-20.25.3-py3-none-any.whl", hash = "sha256:8aac4332f2ea6ef519c648d0bc48a5b1d324994753519919bddbb1aff25a104e"}, + {file = "virtualenv-20.25.3.tar.gz", hash = "sha256:7bb554bbdfeaacc3349fa614ea5bff6ac300fc7c335e9facf3a3bcfc703f45be"}, ] [package.dependencies] @@ -1407,7 +1407,7 @@ filelock = ">=3.12.2,<4" platformdirs = ">=3.9.1,<5" [package.extras] -docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.2)", "sphinx-argparse (>=0.4)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=23.6)"] +docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.2,!=7.3)", "sphinx-argparse (>=0.4)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=23.6)"] test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=23.1)", "pytest (>=7.4)", "pytest-env (>=0.8.2)", "pytest-freezer (>=0.4.8)", "pytest-mock (>=3.11.1)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)", "setuptools (>=68)", "time-machine (>=2.10)"] [metadata] From feb2ce0d12ac4b080709ed9ae8faf2b8aa491fc8 Mon Sep 17 00:00:00 2001 From: Francis Charette-Migneault Date: Wed, 17 Apr 2024 16:18:13 -0400 Subject: [PATCH 097/112] fix missing jsonschema dependency --- poetry.lock | 179 ++++++++++++++++++++++++++++++++++++++++++++++++- pyproject.toml | 1 + 2 files changed, 179 insertions(+), 1 deletion(-) diff --git a/poetry.lock b/poetry.lock index 98596dd..b563e0e 100644 --- a/poetry.lock +++ b/poetry.lock @@ -11,6 +11,25 @@ files = [ {file = "annotated_types-0.6.0.tar.gz", hash = "sha256:563339e807e53ffd9c267e99fc6d9ea23eb8443c08f112651963e24e22f84a5d"}, ] +[[package]] +name = "attrs" +version = "23.2.0" +description = "Classes Without Boilerplate" +optional = false +python-versions = ">=3.7" +files = [ + {file = "attrs-23.2.0-py3-none-any.whl", hash = "sha256:99b87a485a5820b23b879f04c2305b44b951b502fd64be915879d77a7e8fc6f1"}, + {file = "attrs-23.2.0.tar.gz", hash = "sha256:935dc3b529c262f6cf76e50877d35a4bd3c1de194fd41f47a2b7ae8f19971f30"}, +] + +[package.extras] +cov = ["attrs[tests]", "coverage[toml] (>=5.3)"] +dev = ["attrs[tests]", "pre-commit"] +docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope-interface"] +tests = ["attrs[tests-no-zope]", "zope-interface"] +tests-mypy = ["mypy (>=1.6)", "pytest-mypy-plugins"] +tests-no-zope = ["attrs[tests-mypy]", "cloudpickle", "hypothesis", "pympler", "pytest (>=4.3.0)", "pytest-xdist[psutil]"] + [[package]] name = "bandit" version = "1.7.8" @@ -355,6 +374,41 @@ files = [ {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, ] +[[package]] +name = "jsonschema" +version = "4.21.1" +description = "An implementation of JSON Schema validation for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "jsonschema-4.21.1-py3-none-any.whl", hash = "sha256:7996507afae316306f9e2290407761157c6f78002dcf7419acb99822143d1c6f"}, + {file = "jsonschema-4.21.1.tar.gz", hash = "sha256:85727c00279f5fa6bedbe6238d2aa6403bedd8b4864ab11207d07df3cc1b2ee5"}, +] + +[package.dependencies] +attrs = ">=22.2.0" +jsonschema-specifications = ">=2023.03.6" +referencing = ">=0.28.4" +rpds-py = ">=0.7.1" + +[package.extras] +format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"] +format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=1.11)"] + +[[package]] +name = "jsonschema-specifications" +version = "2023.12.1" +description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry" +optional = false +python-versions = ">=3.8" +files = [ + {file = "jsonschema_specifications-2023.12.1-py3-none-any.whl", hash = "sha256:87e4fdf3a94858b8a2ba2778d9ba57d8a9cafca7c7489c46ba0d30a8bc6a9c3c"}, + {file = "jsonschema_specifications-2023.12.1.tar.gz", hash = "sha256:48a76787b3e70f5ed53f1160d2b81f586e4ca6d1548c5de7085d1682674764cc"}, +] + +[package.dependencies] +referencing = ">=0.31.0" + [[package]] name = "markdown-it-py" version = "3.0.0" @@ -1028,6 +1082,21 @@ files = [ {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, ] +[[package]] +name = "referencing" +version = "0.34.0" +description = "JSON Referencing + Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "referencing-0.34.0-py3-none-any.whl", hash = "sha256:d53ae300ceddd3169f1ffa9caf2cb7b769e92657e4fafb23d34b93679116dfd4"}, + {file = "referencing-0.34.0.tar.gz", hash = "sha256:5773bd84ef41799a5a8ca72dc34590c041eb01bf9aa02632b4a973fb0181a844"}, +] + +[package.dependencies] +attrs = ">=22.2.0" +rpds-py = ">=0.7.0" + [[package]] name = "requests" version = "2.31.0" @@ -1067,6 +1136,114 @@ pygments = ">=2.13.0,<3.0.0" [package.extras] jupyter = ["ipywidgets (>=7.5.1,<9)"] +[[package]] +name = "rpds-py" +version = "0.18.0" +description = "Python bindings to Rust's persistent data structures (rpds)" +optional = false +python-versions = ">=3.8" +files = [ + {file = "rpds_py-0.18.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:5b4e7d8d6c9b2e8ee2d55c90b59c707ca59bc30058269b3db7b1f8df5763557e"}, + {file = "rpds_py-0.18.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c463ed05f9dfb9baebef68048aed8dcdc94411e4bf3d33a39ba97e271624f8f7"}, + {file = "rpds_py-0.18.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:01e36a39af54a30f28b73096dd39b6802eddd04c90dbe161c1b8dbe22353189f"}, + {file = "rpds_py-0.18.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d62dec4976954a23d7f91f2f4530852b0c7608116c257833922a896101336c51"}, + {file = "rpds_py-0.18.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dd18772815d5f008fa03d2b9a681ae38d5ae9f0e599f7dda233c439fcaa00d40"}, + {file = "rpds_py-0.18.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:923d39efa3cfb7279a0327e337a7958bff00cc447fd07a25cddb0a1cc9a6d2da"}, + {file = "rpds_py-0.18.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:39514da80f971362f9267c600b6d459bfbbc549cffc2cef8e47474fddc9b45b1"}, + {file = "rpds_py-0.18.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a34d557a42aa28bd5c48a023c570219ba2593bcbbb8dc1b98d8cf5d529ab1434"}, + {file = "rpds_py-0.18.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:93df1de2f7f7239dc9cc5a4a12408ee1598725036bd2dedadc14d94525192fc3"}, + {file = "rpds_py-0.18.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:34b18ba135c687f4dac449aa5157d36e2cbb7c03cbea4ddbd88604e076aa836e"}, + {file = "rpds_py-0.18.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:c0b5dcf9193625afd8ecc92312d6ed78781c46ecbf39af9ad4681fc9f464af88"}, + {file = "rpds_py-0.18.0-cp310-none-win32.whl", hash = "sha256:c4325ff0442a12113a6379af66978c3fe562f846763287ef66bdc1d57925d337"}, + {file = "rpds_py-0.18.0-cp310-none-win_amd64.whl", hash = "sha256:7223a2a5fe0d217e60a60cdae28d6949140dde9c3bcc714063c5b463065e3d66"}, + {file = "rpds_py-0.18.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:3a96e0c6a41dcdba3a0a581bbf6c44bb863f27c541547fb4b9711fd8cf0ffad4"}, + {file = "rpds_py-0.18.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:30f43887bbae0d49113cbaab729a112251a940e9b274536613097ab8b4899cf6"}, + {file = "rpds_py-0.18.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fcb25daa9219b4cf3a0ab24b0eb9a5cc8949ed4dc72acb8fa16b7e1681aa3c58"}, + {file = "rpds_py-0.18.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d68c93e381010662ab873fea609bf6c0f428b6d0bb00f2c6939782e0818d37bf"}, + {file = "rpds_py-0.18.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b34b7aa8b261c1dbf7720b5d6f01f38243e9b9daf7e6b8bc1fd4657000062f2c"}, + {file = "rpds_py-0.18.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2e6d75ab12b0bbab7215e5d40f1e5b738aa539598db27ef83b2ec46747df90e1"}, + {file = "rpds_py-0.18.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b8612cd233543a3781bc659c731b9d607de65890085098986dfd573fc2befe5"}, + {file = "rpds_py-0.18.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:aec493917dd45e3c69d00a8874e7cbed844efd935595ef78a0f25f14312e33c6"}, + {file = "rpds_py-0.18.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:661d25cbffaf8cc42e971dd570d87cb29a665f49f4abe1f9e76be9a5182c4688"}, + {file = "rpds_py-0.18.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:1df3659d26f539ac74fb3b0c481cdf9d725386e3552c6fa2974f4d33d78e544b"}, + {file = "rpds_py-0.18.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a1ce3ba137ed54f83e56fb983a5859a27d43a40188ba798993812fed73c70836"}, + {file = "rpds_py-0.18.0-cp311-none-win32.whl", hash = "sha256:69e64831e22a6b377772e7fb337533c365085b31619005802a79242fee620bc1"}, + {file = "rpds_py-0.18.0-cp311-none-win_amd64.whl", hash = "sha256:998e33ad22dc7ec7e030b3df701c43630b5bc0d8fbc2267653577e3fec279afa"}, + {file = "rpds_py-0.18.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:7f2facbd386dd60cbbf1a794181e6aa0bd429bd78bfdf775436020172e2a23f0"}, + {file = "rpds_py-0.18.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1d9a5be316c15ffb2b3c405c4ff14448c36b4435be062a7f578ccd8b01f0c4d8"}, + {file = "rpds_py-0.18.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cd5bf1af8efe569654bbef5a3e0a56eca45f87cfcffab31dd8dde70da5982475"}, + {file = "rpds_py-0.18.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5417558f6887e9b6b65b4527232553c139b57ec42c64570569b155262ac0754f"}, + {file = "rpds_py-0.18.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:56a737287efecafc16f6d067c2ea0117abadcd078d58721f967952db329a3e5c"}, + {file = "rpds_py-0.18.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8f03bccbd8586e9dd37219bce4d4e0d3ab492e6b3b533e973fa08a112cb2ffc9"}, + {file = "rpds_py-0.18.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4457a94da0d5c53dc4b3e4de1158bdab077db23c53232f37a3cb7afdb053a4e3"}, + {file = "rpds_py-0.18.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0ab39c1ba9023914297dd88ec3b3b3c3f33671baeb6acf82ad7ce883f6e8e157"}, + {file = "rpds_py-0.18.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:9d54553c1136b50fd12cc17e5b11ad07374c316df307e4cfd6441bea5fb68496"}, + {file = "rpds_py-0.18.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0af039631b6de0397ab2ba16eaf2872e9f8fca391b44d3d8cac317860a700a3f"}, + {file = "rpds_py-0.18.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:84ffab12db93b5f6bad84c712c92060a2d321b35c3c9960b43d08d0f639d60d7"}, + {file = "rpds_py-0.18.0-cp312-none-win32.whl", hash = "sha256:685537e07897f173abcf67258bee3c05c374fa6fff89d4c7e42fb391b0605e98"}, + {file = "rpds_py-0.18.0-cp312-none-win_amd64.whl", hash = "sha256:e003b002ec72c8d5a3e3da2989c7d6065b47d9eaa70cd8808b5384fbb970f4ec"}, + {file = "rpds_py-0.18.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:08f9ad53c3f31dfb4baa00da22f1e862900f45908383c062c27628754af2e88e"}, + {file = "rpds_py-0.18.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c0013fe6b46aa496a6749c77e00a3eb07952832ad6166bd481c74bda0dcb6d58"}, + {file = "rpds_py-0.18.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e32a92116d4f2a80b629778280103d2a510a5b3f6314ceccd6e38006b5e92dcb"}, + {file = "rpds_py-0.18.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e541ec6f2ec456934fd279a3120f856cd0aedd209fc3852eca563f81738f6861"}, + {file = "rpds_py-0.18.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bed88b9a458e354014d662d47e7a5baafd7ff81c780fd91584a10d6ec842cb73"}, + {file = "rpds_py-0.18.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2644e47de560eb7bd55c20fc59f6daa04682655c58d08185a9b95c1970fa1e07"}, + {file = "rpds_py-0.18.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8e8916ae4c720529e18afa0b879473049e95949bf97042e938530e072fde061d"}, + {file = "rpds_py-0.18.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:465a3eb5659338cf2a9243e50ad9b2296fa15061736d6e26240e713522b6235c"}, + {file = "rpds_py-0.18.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:ea7d4a99f3b38c37eac212dbd6ec42b7a5ec51e2c74b5d3223e43c811609e65f"}, + {file = "rpds_py-0.18.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:67071a6171e92b6da534b8ae326505f7c18022c6f19072a81dcf40db2638767c"}, + {file = "rpds_py-0.18.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:41ef53e7c58aa4ef281da975f62c258950f54b76ec8e45941e93a3d1d8580594"}, + {file = "rpds_py-0.18.0-cp38-none-win32.whl", hash = "sha256:fdea4952db2793c4ad0bdccd27c1d8fdd1423a92f04598bc39425bcc2b8ee46e"}, + {file = "rpds_py-0.18.0-cp38-none-win_amd64.whl", hash = "sha256:7cd863afe7336c62ec78d7d1349a2f34c007a3cc6c2369d667c65aeec412a5b1"}, + {file = "rpds_py-0.18.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:5307def11a35f5ae4581a0b658b0af8178c65c530e94893345bebf41cc139d33"}, + {file = "rpds_py-0.18.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:77f195baa60a54ef9d2de16fbbfd3ff8b04edc0c0140a761b56c267ac11aa467"}, + {file = "rpds_py-0.18.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:39f5441553f1c2aed4de4377178ad8ff8f9d733723d6c66d983d75341de265ab"}, + {file = "rpds_py-0.18.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9a00312dea9310d4cb7dbd7787e722d2e86a95c2db92fbd7d0155f97127bcb40"}, + {file = "rpds_py-0.18.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8f2fc11e8fe034ee3c34d316d0ad8808f45bc3b9ce5857ff29d513f3ff2923a1"}, + {file = "rpds_py-0.18.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:586f8204935b9ec884500498ccc91aa869fc652c40c093bd9e1471fbcc25c022"}, + {file = "rpds_py-0.18.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ddc2f4dfd396c7bfa18e6ce371cba60e4cf9d2e5cdb71376aa2da264605b60b9"}, + {file = "rpds_py-0.18.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5ddcba87675b6d509139d1b521e0c8250e967e63b5909a7e8f8944d0f90ff36f"}, + {file = "rpds_py-0.18.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:7bd339195d84439cbe5771546fe8a4e8a7a045417d8f9de9a368c434e42a721e"}, + {file = "rpds_py-0.18.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:d7c36232a90d4755b720fbd76739d8891732b18cf240a9c645d75f00639a9024"}, + {file = "rpds_py-0.18.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:6b0817e34942b2ca527b0e9298373e7cc75f429e8da2055607f4931fded23e20"}, + {file = "rpds_py-0.18.0-cp39-none-win32.whl", hash = "sha256:99f70b740dc04d09e6b2699b675874367885217a2e9f782bdf5395632ac663b7"}, + {file = "rpds_py-0.18.0-cp39-none-win_amd64.whl", hash = "sha256:6ef687afab047554a2d366e112dd187b62d261d49eb79b77e386f94644363294"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:ad36cfb355e24f1bd37cac88c112cd7730873f20fb0bdaf8ba59eedf8216079f"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:36b3ee798c58ace201289024b52788161e1ea133e4ac93fba7d49da5fec0ef9e"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f8a2f084546cc59ea99fda8e070be2fd140c3092dc11524a71aa8f0f3d5a55ca"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e4461d0f003a0aa9be2bdd1b798a041f177189c1a0f7619fe8c95ad08d9a45d7"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8db715ebe3bb7d86d77ac1826f7d67ec11a70dbd2376b7cc214199360517b641"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:793968759cd0d96cac1e367afd70c235867831983f876a53389ad869b043c948"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:66e6a3af5a75363d2c9a48b07cb27c4ea542938b1a2e93b15a503cdfa8490795"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6ef0befbb5d79cf32d0266f5cff01545602344eda89480e1dd88aca964260b18"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:1d4acf42190d449d5e89654d5c1ed3a4f17925eec71f05e2a41414689cda02d1"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:a5f446dd5055667aabaee78487f2b5ab72e244f9bc0b2ffebfeec79051679984"}, + {file = "rpds_py-0.18.0-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:9dbbeb27f4e70bfd9eec1be5477517365afe05a9b2c441a0b21929ee61048124"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:22806714311a69fd0af9b35b7be97c18a0fc2826e6827dbb3a8c94eac6cf7eeb"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:b34ae4636dfc4e76a438ab826a0d1eed2589ca7d9a1b2d5bb546978ac6485461"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c8370641f1a7f0e0669ddccca22f1da893cef7628396431eb445d46d893e5cd"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c8362467a0fdeccd47935f22c256bec5e6abe543bf0d66e3d3d57a8fb5731863"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:11a8c85ef4a07a7638180bf04fe189d12757c696eb41f310d2426895356dcf05"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b316144e85316da2723f9d8dc75bada12fa58489a527091fa1d5a612643d1a0e"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cf1ea2e34868f6fbf070e1af291c8180480310173de0b0c43fc38a02929fc0e3"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e546e768d08ad55b20b11dbb78a745151acbd938f8f00d0cfbabe8b0199b9880"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:4901165d170a5fde6f589acb90a6b33629ad1ec976d4529e769c6f3d885e3e80"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-musllinux_1_2_i686.whl", hash = "sha256:618a3d6cae6ef8ec88bb76dd80b83cfe415ad4f1d942ca2a903bf6b6ff97a2da"}, + {file = "rpds_py-0.18.0-pp38-pypy38_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:ed4eb745efbff0a8e9587d22a84be94a5eb7d2d99c02dacf7bd0911713ed14dd"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:6c81e5f372cd0dc5dc4809553d34f832f60a46034a5f187756d9b90586c2c307"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:43fbac5f22e25bee1d482c97474f930a353542855f05c1161fd804c9dc74a09d"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6d7faa6f14017c0b1e69f5e2c357b998731ea75a442ab3841c0dbbbfe902d2c4"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:08231ac30a842bd04daabc4d71fddd7e6d26189406d5a69535638e4dcb88fe76"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:044a3e61a7c2dafacae99d1e722cc2d4c05280790ec5a05031b3876809d89a5c"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3f26b5bd1079acdb0c7a5645e350fe54d16b17bfc5e71f371c449383d3342e17"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:482103aed1dfe2f3b71a58eff35ba105289b8d862551ea576bd15479aba01f66"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1374f4129f9bcca53a1bba0bb86bf78325a0374577cf7e9e4cd046b1e6f20e24"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:635dc434ff724b178cb192c70016cc0ad25a275228f749ee0daf0eddbc8183b1"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:bc362ee4e314870a70f4ae88772d72d877246537d9f8cb8f7eacf10884862432"}, + {file = "rpds_py-0.18.0-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:4832d7d380477521a8c1644bbab6588dfedea5e30a7d967b5fb75977c45fd77f"}, + {file = "rpds_py-0.18.0.tar.gz", hash = "sha256:42821446ee7a76f5d9f71f9e33a4fb2ffd724bb3e7f93386150b61a43115788d"}, +] + [[package]] name = "ruamel-yaml" version = "0.18.6" @@ -1413,4 +1590,4 @@ test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess [metadata] lock-version = "2.0" python-versions = "^3.10" -content-hash = "1c9995bdb53dc27d8bd473ea4f6bdbcf1461a543ac03014ccae08bebb1462d8c" +content-hash = "cffd6f5f281a2af19da8a5834277d91f533509803d9a3afc59df73228591b6a7" diff --git a/pyproject.toml b/pyproject.toml index 07c79b5..76e1ca0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -59,6 +59,7 @@ pydantic = "^2.6.3" # bug in post 2.3 https://github.com/pydantic/pydantic/issue pydantic-core = "^2" pystac = "^1.9.0" shapely = "^2" +jsonschema = "^4.21.1" [tool.poetry.group.dev.dependencies] mypy = "^1.0.0" From 8c62744d21a7d2bb4ef8fca099af907b2814079e Mon Sep 17 00:00:00 2001 From: Francis Charette-Migneault Date: Wed, 17 Apr 2024 23:15:27 -0400 Subject: [PATCH 098/112] fix typing definitions --- .pre-commit-config.yaml | 3 + README_STAC_MODEL.md | 2 +- examples/{ => dlm-legacy}/item.json | 0 examples/{ => dlm-legacy}/item.yml | 0 pyproject.toml | 3 + stac_model/__main__.py | 12 ++-- stac_model/base.py | 59 +++++++++++------- stac_model/examples.py | 29 +++------ stac_model/input.py | 73 +++++++++++----------- stac_model/output.py | 20 +++--- stac_model/runtime.py | 20 +++--- stac_model/schema.py | 94 ++++++++++++++++++++--------- tests/conftest.py | 18 ++++-- tests/test_schema.py | 11 ++-- 14 files changed, 203 insertions(+), 141 deletions(-) rename examples/{ => dlm-legacy}/item.json (100%) rename examples/{ => dlm-legacy}/item.yml (100%) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 19283ea..3ff038d 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -13,3 +13,6 @@ repos: rev: 'v0.1.12' # Use the latest version of ruff-pre-commit hooks: - id: ruff + pass_filenames: false + args: + - --config=pyproject.toml diff --git a/README_STAC_MODEL.md b/README_STAC_MODEL.md index 24aa435..b524f8a 100644 --- a/README_STAC_MODEL.md +++ b/README_STAC_MODEL.md @@ -47,7 +47,7 @@ stac-model --help stac-model ``` -This will make [this example item](./examples/item.json) for an example model. +This will make [this example item](./examples/item_basic.json) for an example model. ## :chart_with_upwards_trend: Releases diff --git a/examples/item.json b/examples/dlm-legacy/item.json similarity index 100% rename from examples/item.json rename to examples/dlm-legacy/item.json diff --git a/examples/item.yml b/examples/dlm-legacy/item.yml similarity index 100% rename from examples/item.yml rename to examples/dlm-legacy/item.yml diff --git a/pyproject.toml b/pyproject.toml index 76e1ca0..91adf41 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -98,6 +98,9 @@ exclude = [ "env", "venv" ] +respect-gitignore = true +line-length = 120 +show-fixes = true [tool.ruff.lint] select = [ diff --git a/stac_model/__main__.py b/stac_model/__main__.py index 7c2a1c3..220730c 100644 --- a/stac_model/__main__.py +++ b/stac_model/__main__.py @@ -1,15 +1,15 @@ +import json + import typer from rich.console import Console -import json + from stac_model import __version__ from stac_model.examples import eurosat_resnet +from stac_model.schema import ItemMLModelExtension app = typer.Typer( name="stac-model", - help=( - "A PydanticV2 validation and serialization library for the STAC Machine" - "Learning Model Extension" - ), + help="A PydanticV2 validation and serialization library for the STAC Machine Learning Model Extension", add_completion=False, ) console = Console() @@ -32,7 +32,7 @@ def main( is_eager=True, help="Prints the version of the stac-model package.", ), -) -> None: +) -> ItemMLModelExtension: """Generate example spec.""" ml_model_meta = eurosat_resnet() with open("example.json", "w") as json_file: diff --git a/stac_model/base.py b/stac_model/base.py index ec82721..4e8cc6b 100644 --- a/stac_model/base.py +++ b/stac_model/base.py @@ -1,8 +1,18 @@ from dataclasses import dataclass from enum import Enum -from typing import Any, Literal, Union, TypeAlias - -from pydantic import BaseModel, model_serializer +from typing import Any, Dict, List, Literal, TypeAlias, Union + +from pydantic import BaseModel, ConfigDict, model_serializer + +Number: TypeAlias = Union[int, float] +JSON: TypeAlias = Union[ + Dict[str, "JSON"], + List["JSON"], + Number, + bool, + str, + None, +] @dataclass @@ -20,15 +30,17 @@ class MLMBaseModel(BaseModel): ```python field: Annotated[Optional[], OmitIfNone] = None # or - field: Annotated[, OmitIfNone] = None - # or - field: Annotated[, OmitIfNone] = Field(default=None) + field: Annotated[Optional[], OmitIfNone] = Field(default=None) ``` + Since `OmitIfNone` implies that the value could be `None` (even though it would be dropped), + the `Optional` annotation must be specified to corresponding typings to avoid `mypy` lint issues. + It is important to use `MLMBaseModel`, otherwise the serializer will not be called and applied. Reference: https://github.com/pydantic/pydantic/discussions/5461#discussioncomment-7503283 """ + @model_serializer def model_serialize(self): omit_if_none_fields = { @@ -38,10 +50,15 @@ def model_serialize(self): } values = { self.__fields__[key].alias or key: val # use the alias if specified - for key, val in self if key not in omit_if_none_fields or val is not None + for key, val in self + if key not in omit_if_none_fields or val is not None } return values + model_config = ConfigDict( + populate_by_name=True, + ) + DataType: TypeAlias = Literal[ "uint8", @@ -59,7 +76,7 @@ def model_serialize(self): "cint32", "cfloat32", "cfloat64", - "other" + "other", ] @@ -80,19 +97,19 @@ class TaskEnum(str, Enum): ModelTaskNames: TypeAlias = Literal[ - "regression", - "classification", - "scene-classification", - "detection", - "object-detection", - "segmentation", - "semantic-segmentation", - "instance-segmentation", - "panoptic-segmentation", - "similarity-search", - "generative", - "image-captioning", - "super-resolution" + "regression", + "classification", + "scene-classification", + "detection", + "object-detection", + "segmentation", + "semantic-segmentation", + "instance-segmentation", + "panoptic-segmentation", + "similarity-search", + "generative", + "image-captioning", + "super-resolution", ] diff --git a/stac_model/examples.py b/stac_model/examples.py index 667ea38..dcde946 100644 --- a/stac_model/examples.py +++ b/stac_model/examples.py @@ -1,26 +1,20 @@ +from typing import cast + import pystac -import json import shapely from dateutil.parser import parse as parse_dt -from typing import cast - from pystac.extensions.file import FileExtension from stac_model.base import ProcessingExpression -from stac_model.input import ModelInput, InputStructure, MLMStatistic -from stac_model.output import ModelOutput, ModelResult, MLMClassification -from stac_model.schema import MLModelExtension, MLModelProperties +from stac_model.input import InputStructure, MLMStatistic, ModelInput +from stac_model.output import MLMClassification, ModelOutput, ModelResult +from stac_model.schema import ItemMLModelExtension, MLModelExtension, MLModelProperties -def eurosat_resnet() -> MLModelExtension[pystac.Item]: +def eurosat_resnet() -> ItemMLModelExtension: input_array = InputStructure( shape=[-1, 13, 64, 64], - dim_order=[ - "batch", - "channel", - "height", - "width" - ], + dim_order=["batch", "channel", "height", "width"], data_type="float32", ) band_names = [ @@ -82,7 +76,7 @@ def eurosat_resnet() -> MLModelExtension[pystac.Item]: statistics=stats, pre_processing_function=ProcessingExpression( format="python", - expression="torchgeo.datamodules.eurosat.EuroSATDataModule.collate_fn" + expression="torchgeo.datamodules.eurosat.EuroSATDataModule.collate_fn", ), # noqa: E501 ) result_array = ModelResult( @@ -178,15 +172,12 @@ def eurosat_resnet() -> MLModelExtension[pystac.Item]: "start_datetime": parse_dt(start_datetime).isoformat() + "Z", "end_datetime": parse_dt(end_datetime).isoformat() + "Z", "description": ( - "Sourced from torchgeo python library," - "identifier is ResNet18_Weights.SENTINEL2_ALL_MOCO" + "Sourced from torchgeo python library," "identifier is ResNet18_Weights.SENTINEL2_ALL_MOCO" ), }, assets=assets, ) - item.add_derived_from( - "https://earth-search.aws.element84.com/v1/collections/sentinel-2-l2a" - ) + item.add_derived_from("https://earth-search.aws.element84.com/v1/collections/sentinel-2-l2a") model_asset = cast( FileExtension[pystac.Asset], diff --git a/stac_model/input.py b/stac_model/input.py index 07b2e64..19c6e13 100644 --- a/stac_model/input.py +++ b/stac_model/input.py @@ -1,11 +1,8 @@ -from typing import Any, Annotated, List, Literal, Optional, Set, TypeAlias, Union +from typing import Annotated, List, Literal, Optional, TypeAlias, Union -from pystac.extensions.raster import Statistics -from pydantic import ConfigDict, Field, model_serializer +from pydantic import Field -from stac_model.base import DataType, MLMBaseModel, ProcessingExpression, OmitIfNone - -Number: TypeAlias = Union[int, float] +from stac_model.base import DataType, MLMBaseModel, Number, OmitIfNone, ProcessingExpression class InputStructure(MLMBaseModel): @@ -23,40 +20,44 @@ class MLMStatistic(MLMBaseModel): # FIXME: add 'Statistics' dep from raster ext valid_percent: Annotated[Optional[Number], OmitIfNone] = None -NormalizeType: TypeAlias = Optional[Literal[ - "min-max", - "z-score", - "l1", - "l2", - "l2sqr", - "hamming", - "hamming2", - "type-mask", - "relative", - "inf" -]] - -ResizeType: TypeAlias = Optional[Literal[ - "crop", - "pad", - "interpolation-nearest", - "interpolation-linear", - "interpolation-cubic", - "interpolation-area", - "interpolation-lanczos4", - "interpolation-max", - "wrap-fill-outliers", - "wrap-inverse-map" -]] +NormalizeType: TypeAlias = Optional[ + Literal[ + "min-max", + "z-score", + "l1", + "l2", + "l2sqr", + "hamming", + "hamming2", + "type-mask", + "relative", + "inf" + ] +] + +ResizeType: TypeAlias = Optional[ + Literal[ + "crop", + "pad", + "interpolation-nearest", + "interpolation-linear", + "interpolation-cubic", + "interpolation-area", + "interpolation-lanczos4", + "interpolation-max", + "wrap-fill-outliers", + "wrap-inverse-map", + ] +] class ModelInput(MLMBaseModel): name: str bands: List[str] # order is critical here (same index as dim shape), allow duplicate if the model needs it somehow input: InputStructure - norm_by_channel: Annotated[bool, OmitIfNone] = None - norm_type: Annotated[NormalizeType, OmitIfNone] = None - norm_clip: Annotated[List[Union[float, int]], OmitIfNone] = None - resize_type: Annotated[ResizeType, OmitIfNone] = None - statistics: Annotated[List[MLMStatistic], OmitIfNone] = None + norm_by_channel: Annotated[Optional[bool], OmitIfNone] = None + norm_type: Annotated[Optional[NormalizeType], OmitIfNone] = None + norm_clip: Annotated[Optional[List[Union[float, int]]], OmitIfNone] = None + resize_type: Annotated[Optional[ResizeType], OmitIfNone] = None + statistics: Annotated[Optional[List[MLMStatistic]], OmitIfNone] = None pre_processing_function: Optional[ProcessingExpression] = None diff --git a/stac_model/output.py b/stac_model/output.py index f6c6933..d734b7f 100644 --- a/stac_model/output.py +++ b/stac_model/output.py @@ -1,10 +1,9 @@ -from typing import Annotated, Any, Dict, List, Optional, Set, TypeAlias, Union -from typing_extensions import NotRequired, TypedDict +from typing import Annotated, Any, List, Optional, Set, Union, cast +from pydantic import AliasChoices, ConfigDict, Field, model_serializer from pystac.extensions.classification import Classification -from pydantic import AliasChoices, ConfigDict, Field, PlainSerializer, model_serializer -from stac_model.base import DataType, MLMBaseModel, ModelTask, ProcessingExpression, OmitIfNone +from stac_model.base import JSON, DataType, MLMBaseModel, ModelTask, OmitIfNone, ProcessingExpression class ModelResult(MLMBaseModel): @@ -33,15 +32,15 @@ class ModelResult(MLMBaseModel): class MLMClassification(MLMBaseModel, Classification): @model_serializer() - def model_dump(self, *_, **__) -> Dict[str, Any]: - return self.to_dict() + def model_dump(self, *_: Any, **__: Any) -> JSON: # type: ignore[override] + return self.to_dict() # type: ignore[call-arg] def __init__( self, value: int, description: Optional[str] = None, name: Optional[str] = None, - color_hint: Optional[str] = None + color_hint: Optional[str] = None, ) -> None: Classification.__init__(self, {}) if not name and not description: @@ -49,7 +48,7 @@ def __init__( self.apply( value=value, name=name or description, - description=description or name, + description=cast(str, description or name), color_hint=color_hint, ) @@ -64,6 +63,7 @@ def __setattr__(self, key: str, value: Any) -> None: model_config = ConfigDict(arbitrary_types_allowed=True) + # class ClassObject(BaseModel): # value: int # name: str @@ -85,10 +85,10 @@ class ModelOutput(MLMBaseModel): # We also get some unhashable errors with 'Set', although 'MLMClassification' implements '__hash__'. classes: Annotated[List[MLMClassification], OmitIfNone] = Field( alias="classification:classes", - validation_alias=AliasChoices("classification:classes", "classification_classes"), + validation_alias=AliasChoices("classification:classes", "classification_classes", "classes"), ) post_processing_function: Optional[ProcessingExpression] = None model_config = ConfigDict( - populate_by_name=True + populate_by_name=True, ) diff --git a/stac_model/runtime.py b/stac_model/runtime.py index 12e989b..9104fa6 100644 --- a/stac_model/runtime.py +++ b/stac_model/runtime.py @@ -1,7 +1,7 @@ from enum import Enum from typing import Annotated, Literal, Optional, Union -from pydantic import Field +from pydantic import AliasChoices, Field from stac_model.base import MLMBaseModel, OmitIfNone @@ -33,13 +33,17 @@ def __str__(self): class Runtime(MLMBaseModel): - framework: Annotated[str, OmitIfNone] = Field(default=None) - framework_version: Annotated[str, OmitIfNone] = Field(default=None) - file_size: Annotated[int, OmitIfNone] = Field(alias="file:size", default=None) - memory_size: Annotated[int, OmitIfNone] = Field(default=None) - batch_size_suggestion: Annotated[int, OmitIfNone] = Field(default=None) + framework: Annotated[Optional[str], OmitIfNone] = Field(default=None) + framework_version: Annotated[Optional[str], OmitIfNone] = Field(default=None) + file_size: Annotated[Optional[int], OmitIfNone] = Field( + alias="file:size", + validation_alias=AliasChoices("file_size", "file:size"), + default=None, + ) + memory_size: Annotated[Optional[int], OmitIfNone] = Field(default=None) + batch_size_suggestion: Annotated[Optional[int], OmitIfNone] = Field(default=None) accelerator: Optional[AcceleratorType] = Field(default=None) accelerator_constrained: bool = Field(default=False) - accelerator_summary: Annotated[str, OmitIfNone] = Field(default=None) - accelerator_count: Annotated[int, OmitIfNone] = Field(default=None, minimum=1) + accelerator_summary: Annotated[Optional[str], OmitIfNone] = Field(default=None) + accelerator_count: Annotated[Optional[int], OmitIfNone] = Field(default=None, minimum=1) diff --git a/stac_model/schema.py b/stac_model/schema.py index b7e4cc3..a8db2b1 100644 --- a/stac_model/schema.py +++ b/stac_model/schema.py @@ -1,7 +1,7 @@ import json from typing import ( - Any, Annotated, + Any, Generic, Iterable, List, @@ -12,16 +12,15 @@ Union, cast, get_args, + overload, ) import pystac from pydantic import ConfigDict, Field from pydantic.fields import FieldInfo -from pystac.extensions import item_assets from pystac.extensions.base import ( ExtensionManagementMixin, PropertiesExtension, - S, # generic pystac.STACObject SummariesExtension, ) @@ -31,7 +30,10 @@ from stac_model.runtime import Runtime T = TypeVar( - "T", pystac.Collection, pystac.Item, pystac.Asset, item_assets.AssetDefinition + "T", + pystac.Collection, + pystac.Item, + pystac.Asset, # item_assets.AssetDefinition, ) SchemaName = Literal["mlm"] @@ -54,21 +56,27 @@ class MLModelProperties(Runtime): pretrained: Annotated[Optional[bool], OmitIfNone] = Field(default=True) pretrained_source: Annotated[Optional[str], OmitIfNone] = None - model_config = ConfigDict( - alias_generator=mlm_prefix_adder, - populate_by_name=True, - extra="ignore" - ) + model_config = ConfigDict(alias_generator=mlm_prefix_adder, populate_by_name=True, extra="ignore") class MLModelExtension( Generic[T], PropertiesExtension, - ExtensionManagementMixin[Union[pystac.Asset, pystac.Item, pystac.Collection]], + # FIXME: resolve typing incompatibility? + # 'pystac.Asset' does not derive from STACObject + # therefore, it technically cannot be used in 'ExtensionManagementMixin[T]' + # however, this makes our extension definition much easier and avoids lots of code duplication + ExtensionManagementMixin[ # type: ignore[type-var] + Union[ + pystac.Collection, + pystac.Item, + pystac.Asset, + ] + ], ): @property def name(self) -> SchemaName: - return get_args(SchemaName)[0] + return cast(SchemaName, get_args(SchemaName)[0]) def apply( self, @@ -87,8 +95,33 @@ def apply( def get_schema_uri(cls) -> str: return SCHEMA_URI + @overload + @classmethod + def ext(cls, obj: pystac.Asset, add_if_missing: bool = False) -> "AssetMLModelExtension": ... + + @overload + @classmethod + def ext(cls, obj: pystac.Item, add_if_missing: bool = False) -> "ItemMLModelExtension": ... + + @overload @classmethod - def ext(cls, obj: T, add_if_missing: bool = False) -> "MLModelExtension[T]": + def ext(cls, obj: pystac.Collection, add_if_missing: bool = False) -> "CollectionMLModelExtension": ... + + # @overload + # @classmethod + # def ext(cls, obj: item_assets.AssetDefinition, add_if_missing: bool = False) -> "ItemAssetsMLModelExtension": + # ... + + @classmethod + def ext( + cls, + obj: Union[pystac.Collection, pystac.Item, pystac.Asset], # item_assets.AssetDefinition + add_if_missing: bool = False, + ) -> Union[ + "CollectionMLModelExtension", + "ItemMLModelExtension", + "AssetMLModelExtension", + ]: """Extends the given STAC Object with properties from the :stac-ext:`Machine Learning Model Extension `. @@ -101,23 +134,21 @@ def ext(cls, obj: T, add_if_missing: bool = False) -> "MLModelExtension[T]": """ if isinstance(obj, pystac.Collection): cls.ensure_has_extension(obj, add_if_missing) - return cast(MLModelExtension[T], CollectionMLModelExtension(obj)) + return CollectionMLModelExtension(obj) elif isinstance(obj, pystac.Item): cls.ensure_has_extension(obj, add_if_missing) - return cast(MLModelExtension[T], ItemMLModelExtension(obj)) + return ItemMLModelExtension(obj) elif isinstance(obj, pystac.Asset): cls.ensure_owner_has_extension(obj, add_if_missing) - return cast(MLModelExtension[T], AssetMLModelExtension(obj)) - elif isinstance(obj, item_assets.AssetDefinition): - cls.ensure_owner_has_extension(obj, add_if_missing) - return cast(MLModelExtension[T], ItemAssetsMLModelExtension(obj)) + return AssetMLModelExtension(obj) + # elif isinstance(obj, item_assets.AssetDefinition): + # cls.ensure_owner_has_extension(obj, add_if_missing) + # return ItemAssetsMLModelExtension(obj) else: raise pystac.ExtensionTypeError(cls._ext_error_message(obj)) @classmethod - def summaries( - cls, obj: pystac.Collection, add_if_missing: bool = False - ) -> "SummariesMLModelExtension": + def summaries(cls, obj: pystac.Collection, add_if_missing: bool = False) -> "SummariesMLModelExtension": """Returns the extended summaries object for the given collection.""" cls.ensure_has_extension(obj, add_if_missing) return SummariesMLModelExtension(obj) @@ -136,12 +167,15 @@ def _check_mlm_property(self, prop: str) -> FieldInfo: raise AttributeError(f"Name '{prop}' is not a valid MLM property.") from err def _validate_mlm_property(self, prop: str, summaries: list[Any]) -> None: - model = MLModelProperties.model_construct() + # ignore mypy issue when combined with Annotated + # - https://github.com/pydantic/pydantic/issues/6713 + # - https://github.com/pydantic/pydantic/issues/5190 + model = MLModelProperties.model_construct() # type: ignore[call-arg] validator = MLModelProperties.__pydantic_validator__ for value in summaries: validator.validate_assignment(model, prop, value) - def get_mlm_property(self, prop: str) -> list[Any]: + def get_mlm_property(self, prop: str) -> Optional[list[Any]]: self._check_mlm_property(prop) return self.summaries.get_list(prop) @@ -175,13 +209,13 @@ def __repr__(self) -> str: return f"" -class ItemAssetsMLModelExtension(MLModelExtension[item_assets.AssetDefinition]): - properties: dict[str, Any] - asset_defn: item_assets.AssetDefinition - - def __init__(self, item_asset: item_assets.AssetDefinition): - self.asset_defn = item_asset - self.properties = item_asset.properties +# class ItemAssetsMLModelExtension(MLModelExtension[item_assets.AssetDefinition]): +# properties: dict[str, Any] +# asset_defn: item_assets.AssetDefinition +# +# def __init__(self, item_asset: item_assets.AssetDefinition): +# self.asset_defn = item_asset +# self.properties = item_asset.properties class AssetMLModelExtension(MLModelExtension[pystac.Asset]): diff --git a/tests/conftest.py b/tests/conftest.py index 1c51fed..0092fe0 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,27 +1,32 @@ import json import os -from typing import Any, Dict, cast +from typing import TYPE_CHECKING, Any, Dict, cast import pystac import pytest +from stac_model.base import JSON from stac_model.examples import eurosat_resnet as make_eurosat_resnet from stac_model.schema import SCHEMA_URI +if TYPE_CHECKING: + from _pytest.fixtures import SubRequest + TEST_DIR = os.path.dirname(__file__) EXAMPLES_DIR = os.path.abspath(os.path.join(TEST_DIR, "../examples")) JSON_SCHEMA_DIR = os.path.abspath(os.path.join(TEST_DIR, "../json-schema")) @pytest.fixture(scope="session") -def mlm_schema() -> Dict[str, Any]: +def mlm_schema() -> JSON: with open(os.path.join(JSON_SCHEMA_DIR, "schema.json")) as schema_file: - return json.load(schema_file) + data = json.load(schema_file) + return cast(JSON, data) @pytest.fixture(scope="session") def mlm_validator( - request: pytest.FixtureRequest, + request: "SubRequest", mlm_schema: Dict[str, Any], ) -> pystac.validation.stac_validator.JsonSchemaSTACValidator: """ @@ -40,9 +45,10 @@ def mlm_validator( @pytest.fixture -def mlm_example(request) -> Dict[str, Any]: +def mlm_example(request: "SubRequest") -> JSON: with open(os.path.join(EXAMPLES_DIR, request.param)) as example_file: - return json.load(example_file) + data = json.load(example_file) + return cast(JSON, data) @pytest.fixture(name="eurosat_resnet") diff --git a/tests/test_schema.py b/tests/test_schema.py index e720cc7..a3e9899 100644 --- a/tests/test_schema.py +++ b/tests/test_schema.py @@ -1,7 +1,10 @@ -from typing import Any, Dict +from typing import Any, Dict, cast + import pystac import pytest +from pystac.validation.stac_validator import STACValidator +from stac_model.base import JSON from stac_model.schema import SCHEMA_URI @@ -16,10 +19,10 @@ indirect=True, ) def test_mlm_schema( - mlm_validator: pystac.validation.STACValidator, - mlm_example, + mlm_validator: STACValidator, + mlm_example: JSON, ) -> None: - mlm_item = pystac.Item.from_dict(mlm_example) + mlm_item = pystac.Item.from_dict(cast(Dict[str, Any], mlm_example)) validated = pystac.validation.validate(mlm_item, validator=mlm_validator) assert len(validated) >= len(mlm_item.stac_extensions) # extra STAC core schemas assert SCHEMA_URI in validated From 5de66934d56470e4a8436d47bdb85262ab34ef9b Mon Sep 17 00:00:00 2001 From: Francis Charette-Migneault Date: Wed, 17 Apr 2024 23:28:14 -0400 Subject: [PATCH 099/112] fix pydantic recursion error on JSON type --- stac_model/output.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/stac_model/output.py b/stac_model/output.py index d734b7f..0e25ec7 100644 --- a/stac_model/output.py +++ b/stac_model/output.py @@ -1,9 +1,9 @@ -from typing import Annotated, Any, List, Optional, Set, Union, cast +from typing import Annotated, Any, Dict, List, Optional, Set, Union, cast from pydantic import AliasChoices, ConfigDict, Field, model_serializer from pystac.extensions.classification import Classification -from stac_model.base import JSON, DataType, MLMBaseModel, ModelTask, OmitIfNone, ProcessingExpression +from stac_model.base import DataType, MLMBaseModel, ModelTask, OmitIfNone, ProcessingExpression class ModelResult(MLMBaseModel): @@ -32,7 +32,7 @@ class ModelResult(MLMBaseModel): class MLMClassification(MLMBaseModel, Classification): @model_serializer() - def model_dump(self, *_: Any, **__: Any) -> JSON: # type: ignore[override] + def model_dump(self, *_: Any, **__: Any) -> Dict[str, Any]: return self.to_dict() # type: ignore[call-arg] def __init__( @@ -61,7 +61,10 @@ def __setattr__(self, key: str, value: Any) -> None: else: MLMBaseModel.__setattr__(self, key, value) - model_config = ConfigDict(arbitrary_types_allowed=True) + model_config = ConfigDict( + populate_by_name=True, + arbitrary_types_allowed=True, + ) # class ClassObject(BaseModel): From 7f7620c1f3c5808d20485d8afe0657b8c5e7f5c1 Mon Sep 17 00:00:00 2001 From: Francis Charette-Migneault Date: Wed, 17 Apr 2024 23:51:43 -0400 Subject: [PATCH 100/112] more linting fixes --- .github/workflows/build.yml | 8 ++------ .github/workflows/test.yaml | 2 +- Makefile | 2 +- README.md | 2 +- pyproject.toml | 16 ++++++++++++---- stac_model/__init__.py | 4 +++- stac_model/schema.py | 34 ++++++++++++++++++++++++---------- 7 files changed, 44 insertions(+), 24 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 8fe8d65..250a7a3 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -29,14 +29,10 @@ jobs: - name: Install dependencies run: make install-dev - - name: Run style checks + - name: Run checks run: | - make lint + make lint-all - name: Run tests run: | make test - - - name: Run safety checks - run: | - make check-safety diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 87f33a5..d44317b 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -11,4 +11,4 @@ jobs: - uses: actions/checkout@v2 - run: | npm install - npm test \ No newline at end of file + npm test diff --git a/Makefile b/Makefile index e7bc0dd..3dd26e5 100644 --- a/Makefile +++ b/Makefile @@ -91,7 +91,7 @@ format-examples: install-npm fix-%: format-%s .PHONY: lint-all -lint: test lint mypy check-safety check-markdown +lint-all: lint mypy check-safety check-markdown .PHONY: update-dev-deps update-dev-deps: diff --git a/README.md b/README.md index 9a0f58f..07261ea 100644 --- a/README.md +++ b/README.md @@ -53,7 +53,7 @@ connectors, please refer to the [STAC Model](README_STAC_MODEL.md) document. > FIXME: update examples - Examples: - - [Example with a ??? trained with torchgeo](examples/item.json) TODO update example + - **Example with a ??? trained with torchgeo** :warning: TODO update example - [Collection example](examples/collection.json): Shows the basic usage of the extension in a STAC Collection - [JSON Schema](json-schema/schema.json) TODO update - [Changelog](./CHANGELOG.md) diff --git a/pyproject.toml b/pyproject.toml index 91adf41..05160d7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -96,7 +96,8 @@ exclude = [ "build", "dist", "env", - "venv" + "venv", + "node_modules", ] respect-gitignore = true line-length = 120 @@ -159,7 +160,9 @@ warn_required_dynamic_aliases = true # https://github.com/PyCQA/pydocstyle # http://www.pydocstyle.org/en/stable/usage.html#available-options convention = "google" - +match_dir = "^(stac_model|tests)" +# ignore missing documentation, just validate provided ones +add_ignore = "D100,D101,D102,D103,D104,D105,D107,D200,D202,D204,D212,D401" [tool.pydoclint] # https://github.com/jsh9/pydoclint @@ -179,9 +182,13 @@ exclude = ''' | dist | env | venv + | node_modules )/ ''' - +# don't require type hints, since we have them in the signature instead (don't duplicate) +arg-type-hints-in-docstring = false +arg-type-hints-in-signature = true +check-return-types = false [tool.pytest.ini_options] # https://github.com/pytest-dev/pytest @@ -196,7 +203,8 @@ norecursedirs =[ "docs", ".tox", ".git", - "__pycache__" + "__pycache__", + "node_modules", ] doctest_optionflags = ["NUMBER", "NORMALIZE_WHITESPACE", "IGNORE_EXCEPTION_DETAIL"] timeout = 1000 diff --git a/stac_model/__init__.py b/stac_model/__init__.py index 5ed00f2..cb1e8e2 100644 --- a/stac_model/__init__.py +++ b/stac_model/__init__.py @@ -1,4 +1,6 @@ -"""A PydanticV2 validation and serialization library for the STAC ML Model Extension""" +""" +A PydanticV2/PySTAC validation and serialization library for the STAC Machine Learning Model Extension. +""" from importlib import metadata diff --git a/stac_model/schema.py b/stac_model/schema.py index a8db2b1..38c35fc 100644 --- a/stac_model/schema.py +++ b/stac_model/schema.py @@ -82,8 +82,8 @@ def apply( self, properties: Union[MLModelProperties, dict[str, Any]], ) -> None: - """Applies Machine Learning Model Extension properties to the extended - :class:`~pystac.Item` or :class:`~pystac.Asset`. + """ + Applies Machine Learning Model Extension properties to the extended :mod:`~pystac` object. """ if isinstance(properties, dict): properties = MLModelProperties(**properties) @@ -122,14 +122,19 @@ def ext( "ItemMLModelExtension", "AssetMLModelExtension", ]: - """Extends the given STAC Object with properties from the - :stac-ext:`Machine Learning Model Extension `. + """ + Extends the given STAC Object with properties from the :stac-ext:`Machine Learning Model Extension `. - This extension can be applied to instances of :class:`~pystac.Item` or - :class:`~pystac.Asset`. + This extension can be applied to instances of :class:`~pystac.Item` or :class:`~pystac.Asset`. - Raises: + Args: + obj: STAC Object to extend with the MLM extension fields. + add_if_missing: Add the MLM extension schema URI to the object if not already in `stac_extensions`. + Returns: + Extended object. + + Raises: pystac.ExtensionTypeError : If an invalid object type is passed. """ if isinstance(obj, pystac.Collection): @@ -155,7 +160,10 @@ def summaries(cls, obj: pystac.Collection, add_if_missing: bool = False) -> "Sum class SummariesMLModelExtension(SummariesExtension): - """A concrete implementation of :class:`~SummariesExtension` that extends + """ + Summaries annotated with the Machine Learning Model Extension. + + A concrete implementation of :class:`~SummariesExtension` that extends the ``summaries`` field of a :class:`~pystac.Collection` to include properties defined in the :stac-ext:`Machine Learning Model `. """ @@ -192,7 +200,10 @@ def __setattr__(self, prop, value): class ItemMLModelExtension(MLModelExtension[pystac.Item]): - """A concrete implementation of :class:`MLModelExtension` on an + """ + Item annotated with the Machine Learning Model Extension. + + A concrete implementation of :class:`MLModelExtension` on an :class:`~pystac.Item` that extends the properties of the Item to include properties defined in the :stac-ext:`Machine Learning Model Extension `. @@ -219,7 +230,10 @@ def __repr__(self) -> str: class AssetMLModelExtension(MLModelExtension[pystac.Asset]): - """A concrete implementation of :class:`MLModelExtension` on an + """ + Asset annotated with the Machine Learning Model Extension. + + A concrete implementation of :class:`MLModelExtension` on an :class:`~pystac.Asset` that extends the Asset fields to include properties defined in the :stac-ext:`Machine Learning Model Extension `. From 1d7d17a5c9afad66e090e16fa8f90846797161c2 Mon Sep 17 00:00:00 2001 From: Francis Charette-Migneault Date: Thu, 18 Apr 2024 00:08:20 -0400 Subject: [PATCH 101/112] ignore for remark-lint --- .remarkignore | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100644 .remarkignore diff --git a/.remarkignore b/.remarkignore new file mode 100644 index 0000000..56d4ad9 --- /dev/null +++ b/.remarkignore @@ -0,0 +1,16 @@ +# To save time scanning +.idea/ +.vscode/ +.tox/ +.git/ +*.egg-info/ +build/ +dist/ +downloads/ +env/ + +# actual items to ignore +.pytest_cache/ +node_modules/ +docs/_build/ +docs/build/ From a1872e8b2ab1e024945059fa72dd73bd6e1948f0 Mon Sep 17 00:00:00 2001 From: Francis Charette-Migneault Date: Thu, 18 Apr 2024 00:12:35 -0400 Subject: [PATCH 102/112] add remark-lint ignore to npm scripts --- .remarkignore | 2 ++ package.json | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/.remarkignore b/.remarkignore index 56d4ad9..fb1251d 100644 --- a/.remarkignore +++ b/.remarkignore @@ -3,6 +3,8 @@ .vscode/ .tox/ .git/ +.github/**/*.yaml +.github/**/*.yml *.egg-info/ build/ dist/ diff --git a/package.json b/package.json index 4c6ab38..da64d8c 100644 --- a/package.json +++ b/package.json @@ -3,8 +3,8 @@ "version": "1.0.0", "scripts": { "test": "npm run check-markdown && npm run check-examples", - "check-markdown": "remark . -f -r .github/remark.yaml", - "format-markdown": "remark . -f -r .github/remark.yaml -o", + "check-markdown": "remark . -f -r .github/remark.yaml -i .remarkignore", + "format-markdown": "remark . -f -r .github/remark.yaml -i .remarkignore -o", "check-examples": "stac-node-validator . --lint --verbose --schemaMap https://stac-extensions.github.io/template/v1.0.0/schema.json=./json-schema/schema.json", "format-examples": "stac-node-validator . --format --schemaMap https://stac-extensions.github.io/template/v1.0.0/schema.json=./json-schema/schema.json" }, From 7f16176def498a67a2085e0259d6bb64de59a692 Mon Sep 17 00:00:00 2001 From: Francis Charette-Migneault Date: Thu, 18 Apr 2024 00:17:54 -0400 Subject: [PATCH 103/112] downgrade remark-gfm --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index da64d8c..b73762f 100644 --- a/package.json +++ b/package.json @@ -12,7 +12,7 @@ "remark-cli": "^8.0.0", "remark-lint": "^7.0.0", "remark-lint-no-html": "^2.0.0", - "remark-gfm": "^4.0.0", + "remark-gfm": "^3.0.1", "remark-preset-lint-consistent": "^3.0.0", "remark-preset-lint-markdown-style-guide": "^3.0.0", "remark-preset-lint-recommended": "^4.0.0", From 1a5927e5abf18ca29ecd48b107f613e19bf0d5f7 Mon Sep 17 00:00:00 2001 From: Francis Charette-Migneault Date: Thu, 18 Apr 2024 00:21:15 -0400 Subject: [PATCH 104/112] drop remark-gfm causing issues --- .github/remark.yaml | 2 -- package.json | 1 - 2 files changed, 3 deletions(-) diff --git a/.github/remark.yaml b/.github/remark.yaml index a58537b..53722d7 100644 --- a/.github/remark.yaml +++ b/.github/remark.yaml @@ -14,8 +14,6 @@ plugins: - remark-lint-no-consecutive-blank-lines - - remark-lint-maximum-line-length - 120 -# GFM - autolink literals, footnotes, strikethrough, tables, tasklist - - remark-gfm # Code - remark-lint-fenced-code-flag - remark-lint-fenced-code-marker diff --git a/package.json b/package.json index b73762f..7f4abe5 100644 --- a/package.json +++ b/package.json @@ -12,7 +12,6 @@ "remark-cli": "^8.0.0", "remark-lint": "^7.0.0", "remark-lint-no-html": "^2.0.0", - "remark-gfm": "^3.0.1", "remark-preset-lint-consistent": "^3.0.0", "remark-preset-lint-markdown-style-guide": "^3.0.0", "remark-preset-lint-recommended": "^4.0.0", From a1192d328ecbe1be7b733c7e0be23361fa650725 Mon Sep 17 00:00:00 2001 From: Francis Charette-Migneault Date: Thu, 18 Apr 2024 00:26:41 -0400 Subject: [PATCH 105/112] update node in CI and reapply remark-gfm --- .github/remark.yaml | 2 ++ .github/workflows/test.yaml | 5 ++++- package.json | 1 + 3 files changed, 7 insertions(+), 1 deletion(-) diff --git a/.github/remark.yaml b/.github/remark.yaml index 53722d7..a58537b 100644 --- a/.github/remark.yaml +++ b/.github/remark.yaml @@ -14,6 +14,8 @@ plugins: - remark-lint-no-consecutive-blank-lines - - remark-lint-maximum-line-length - 120 +# GFM - autolink literals, footnotes, strikethrough, tables, tasklist + - remark-gfm # Code - remark-lint-fenced-code-flag - remark-lint-fenced-code-marker diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index d44317b..bddc2c4 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -5,9 +5,12 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - node-version: [14.x] + node-version: [16] steps: - uses: actions/setup-node@v1 + with: + node-version: ${{ matrix.node-version }} + cache: npm - uses: actions/checkout@v2 - run: | npm install diff --git a/package.json b/package.json index 7f4abe5..da64d8c 100644 --- a/package.json +++ b/package.json @@ -12,6 +12,7 @@ "remark-cli": "^8.0.0", "remark-lint": "^7.0.0", "remark-lint-no-html": "^2.0.0", + "remark-gfm": "^4.0.0", "remark-preset-lint-consistent": "^3.0.0", "remark-preset-lint-markdown-style-guide": "^3.0.0", "remark-preset-lint-recommended": "^4.0.0", From 43b869197d5fab6ce187295c31d71d70284a4b8b Mon Sep 17 00:00:00 2001 From: Francis Charette-Migneault Date: Thu, 18 Apr 2024 00:33:43 -0400 Subject: [PATCH 106/112] fix STAC examples linting --- examples/item_basic.json | 12 +++++++-- examples/item_eo_bands.json | 52 +++++++++++++++++++++++++++---------- examples/item_multi_io.json | 18 ++++++++++--- 3 files changed, 64 insertions(+), 18 deletions(-) diff --git a/examples/item_basic.json b/examples/item_basic.json index 0778163..a4333dd 100644 --- a/examples/item_basic.json +++ b/examples/item_basic.json @@ -90,13 +90,21 @@ "value": 0, "name": "BACKGROUND", "description": "Background non-city.", - "color_hint": [0, 0, 0] + "color_hint": [ + 0, + 0, + 0 + ] }, { "value": 1, "name": "CITY", "description": "A city is detected.", - "color_hint": [0, 0, 255] + "color_hint": [ + 0, + 0, + 255 + ] } ] } diff --git a/examples/item_eo_bands.json b/examples/item_eo_bands.json index 0d1414a..920fd97 100644 --- a/examples/item_eo_bands.json +++ b/examples/item_eo_bands.json @@ -506,19 +506,45 @@ ], "$comment": "Following 'eo:bands' is required to fulfil schema validation of 'eo' extension.", "eo:bands": [ - {"name": "coastal"}, - {"name": "blue"}, - {"name": "green"}, - {"name": "red"}, - {"name": "rededge1"}, - {"name": "rededge2"}, - {"name": "rededge3"}, - {"name": "nir"}, - {"name": "nir08"}, - {"name": "nir09"}, - {"name": "cirrus"}, - {"name": "swir16"}, - {"name": "swir22"} + { + "name": "coastal" + }, + { + "name": "blue" + }, + { + "name": "green" + }, + { + "name": "red" + }, + { + "name": "rededge1" + }, + { + "name": "rededge2" + }, + { + "name": "rededge3" + }, + { + "name": "nir" + }, + { + "name": "nir08" + }, + { + "name": "nir09" + }, + { + "name": "cirrus" + }, + { + "name": "swir16" + }, + { + "name": "swir22" + } ] }, "source_code": { diff --git a/examples/item_multi_io.json b/examples/item_multi_io.json index cd1b465..c5482e8 100644 --- a/examples/item_multi_io.json +++ b/examples/item_multi_io.json @@ -143,7 +143,11 @@ "value": 1, "name": "VEGETATION", "description": "pixels where vegetation was detected", - "color_hint": [0, 255, 0] + "color_hint": [ + 0, + 255, + 0 + ] } ], "post_processing_function": null @@ -169,13 +173,21 @@ "value": 0, "name": "NON_VEGETATION", "description": "background pixels", - "color_hint": [255, 255, 255] + "color_hint": [ + 255, + 255, + 255 + ] }, { "value": 1, "name": "VEGETATION", "description": "pixels where vegetation was detected", - "color_hint": [0, 0, 0] + "color_hint": [ + 0, + 0, + 0 + ] } ], "post_processing_function": { From 7d95cca4e8e854771ac2597076d388ad1b0ecc3e Mon Sep 17 00:00:00 2001 From: Francis Charette-Migneault Date: Thu, 18 Apr 2024 13:42:41 -0400 Subject: [PATCH 107/112] fix STAC MLM examples - remove old (invalid) DLM examples --- examples/collection.json | 49 +++-- examples/dlm-legacy/item.json | 256 ----------------------- examples/dlm-legacy/item.yml | 357 -------------------------------- examples/item_basic.json | 20 +- examples/item_eo_bands.json | 37 ++-- examples/item_multi_io.json | 25 ++- examples/item_raster_bands.json | 37 ++-- examples/model-arch-summary.txt | 155 -------------- package.json | 4 +- stac_model/examples.py | 34 ++- 10 files changed, 142 insertions(+), 832 deletions(-) delete mode 100644 examples/dlm-legacy/item.json delete mode 100644 examples/dlm-legacy/item.yml delete mode 100644 examples/model-arch-summary.txt diff --git a/examples/collection.json b/examples/collection.json index c710628..7a71b3f 100644 --- a/examples/collection.json +++ b/examples/collection.json @@ -4,46 +4,43 @@ "https://stac-extensions.github.io/item-assets/v1.0.0/schema.json" ], "type": "Collection", - "id": "EO-DL-model-catalog", - "title": "A title", - "description": "Collection that refers to a STAC Item with DLM Extension", + "id": "ml-model-examples", + "title": "Machine Learning Model examples", + "description": "Collection of items contained in the Machine Learning Model examples.", "license": "Apache-2.0", "extent": { "spatial": { "bbox": [ [ - 172.9, - 1.3, - 173, - 1.4 + -7.882190080512502, + 37.13739173208318, + 27.911651652899923, + 58.21798141355221 ] ] }, "temporal": { "interval": [ [ - "2015-06-23T00:00:00Z", - null + "1900-01-01T00:00:00Z", + "9999-12-31T23:59:59Z" ] ] } }, - "assets": { - "example": { - "href": "item.json" - } - }, "item_assets": { - "data": { + "weights": { + "title": "model weights", "roles": [ - "data" - ], + "mlm:model", + "mlm:weights" + ] } }, "summaries": { "datetime": { - "minimum": "2015-06-23T00:00:00Z", - "maximum": "2019-07-10T13:44:56Z" + "minimum": "1900-01-01T00:00:00Z", + "maximum": "9999-12-31T23:59:59Z" } }, "links": [ @@ -52,7 +49,19 @@ "rel": "self" }, { - "href": "item.json", + "href": "item_basic.json", + "rel": "item" + }, + { + "href": "item_eo_bands.json", + "rel": "item" + }, + { + "href": "item_raster_bands.json", + "rel": "item" + }, + { + "href": "item_multi_io.json", "rel": "item" } ] diff --git a/examples/dlm-legacy/item.json b/examples/dlm-legacy/item.json deleted file mode 100644 index 850a604..0000000 --- a/examples/dlm-legacy/item.json +++ /dev/null @@ -1,256 +0,0 @@ -{ - "stac_version": "1.0.0", - "stac_extensions": [ - "https://schemas.stacspec.org/v1.0.0-beta.3/extensions/dl-model/json-schema/schema.json", - "https://stac-extensions.github.io/eo/v1.1.0/schema.json", - "https://stac-extensions.github.io/processing/v1.1.0/schema.json", - "https://stac-extensions.github.io/scientific/v1.0.0/schema.json" - ], - "id": "dlm-resnet18-unet-scse", - "type": "Feature", - "geometry": { - "type": "Polygon", - "coordinates": [ - [ - [ - 180, - 90 - ], - [ - -180, - -90 - ], - [ - -180, - 90 - ], - [ - 180, - 90 - ], - [ - 180, - -90 - ] - ] - ] - }, - "bbox": [ - -180, - -90, - 180, - 90 - ], - "collection": "EO-DL-model-catalog", - "links": [ - { - "rel": "self", - "href": "https://landsat-stac.s3.amazonaws.com/some-eo-models/example-thelper-item.json" - }, - { - "rel": "collection", - "href": "https://landsat-stac.s3.amazonaws.com/some-eo-models/catalog.json" - } - ], - "assets": [ - { - "model_archive": { - "href": "https://drive.google.com/file/d/1PYyZVgH95454sb9LYHSfchbg8GuT__fR/view?usp=sharing", - "type": "application/zip", - "title": "model archive", - "description": "model archive as a google drive link", - "role": [ - "model artefact" - ] - } - } - ], - "properties": { - "datetime": "2016-05-03T13:22:30Z", - "title": "resnet18+unet_scse", - "description": "UNet architecture with a resnet18 backbone and a SCSE layer fine-tuned on Pleiade imagery", - "license": "MIT", - "created": "2020-12-12T00:00:01.000Z", - "updated": "2021-01-04T00:30:55.000Z", - "providers": [ - { - "name": "Effigis Inc.", - "roles": [ - "image licensor" - ], - "url": "https://effigis.com/en/" - }, - { - "name": "Airbus Inc.", - "roles": [ - "image provider" - ], - "url": "https://www.intelligence-airbusds.com/" - } - ], - "platform": "Pleiade", - "gsd": 0.5, - "eo:bands": [ - { - "name": "50-cm panchromatic", - "common_name": "pancro", - "center_wavelength": 400 - }, - { - "name": "blue", - "common_name": "blue", - "center_wavelength": 490 - }, - { - "name": "green", - "common_name": "green", - "center_wavelength": 500 - }, - { - "name": "red", - "common_name": "red", - "center_wavelength": 660 - }, - { - "name": "Near Infrared", - "common_name": "nir", - "center_wavelength": 850 - } - ], - "sci:publications": [ - { - "citation": "Abhijit Guha Roy and Nassir Navab and Christian Wachinger (2018). Concurrent Spatial and Channel Squeeze & Excitation in Fully Convolutional Networks, arXiv 1803.02579", - "doi": "10.1007/978-3-030-00928-1_48" - } - ], - "processing:level": "L4", - "dlm:runtime": { - "framework": "PyTorch", - "version": 1.5, - "model_handler": "thelper.cli.inference_session", - "model_src_url": "https://github.com/crim-ca/gin-model-repo", - "model_commit_hash": null, - "requirement_file": null, - "docker": { - "docker_file": "https://github.com/crim-ca/CCCOT03/blob/main/docker/thelper-geo.dockerfile", - "image_name": "thelper-geo:latest", - "tag": null, - "gpu": true, - "working_dir": "/workspace", - "run": "thelper infer --config /workspace/config.yml --save-dir /workspace --ckpt-path /workspace/ckpt.best.pth" - } - }, - "dlm:archive": [ - { - "name": "config.yml", - "role": [ - "config file" - ] - }, - { - "name": "./test_pleiade/256/input_test.tif", - "role": [ - "test set" - ] - }, - { - "name": "ckpt.best.pth", - "role": [ - "model weight" - ] - } - ], - "dlm:data": { - "process_level": "ortho", - "dtype": "uint16", - "number_of_bands": 4, - "useful_bands": [ - { - "index": 2, - "name": "red" - }, - { - "index": 1, - "name": "green" - }, - { - "index": 3, - "name": "nir" - } - ], - "nodata_value": 0, - "test_file": "input_test.tif", - "item_examples": [ - { - "title": "a pleiade stac item", - "url": "https://example.com/stac/pleiade/item.json" - } - ] - }, - "dlm:inputs": { - "name": "data", - "input_tensors": { - "batch": 1, - "channels": 3, - "height": 224, - "width": 224 - }, - "scaling_factor": 0.003921569, - "normalization:mean": [ - 0.245, - 0.34, - 0.67 - ], - "normalization:std": [ - 0.1, - 0.1, - 0.2 - ], - "selected_bands": [ - 0, - 1, - 3 - ], - "pre_processing_function": null - }, - "dlm:outputs": { - "task": "semantic segmentation", - "number_of_classes": 5, - "dont_care_index": 0, - "final_layer_size": [ - 1, - 5, - 64, - 64 - ], - "class_name_mapping": [ - { - "0": "dontcare" - }, - { - "1": "Bare Exposed Rock" - }, - { - "2": "High density Residential" - }, - { - "3": "Cropland" - }, - { - "4": "Mixed Forest Land" - }, - { - "5": "Lake" - } - ], - "post_processing_function": null - }, - "dlm:architecture": { - "total_nb_parameters": 42813873, - "estimated_total_size_mb": 183.72, - "type": "unet-resnet-18-scse", - "pretrained": "imagenet", - "summary": "----------------------------------------------------------------\n Layer (type) Output Shape Param\n================================================================\n Conv2d-1 [-1, 64, 32, 32] 9,408\n BatchNorm2d-2 [-1, 64, 32, 32] 128\n ReLU-3 [-1, 64, 32, 32] 0\n MaxPool2d-4 [-1, 64, 16, 16] 0\n Conv2d-5 [-1, 64, 16, 16] 36,864\n BatchNorm2d-6 [-1, 64, 16, 16] 128\n ReLU-7 [-1, 64, 16, 16] 0\n Conv2d-8 [-1, 64, 16, 16] 36,864\n BatchNorm2d-9 [-1, 64, 16, 16] 128\n ReLU-10 [-1, 64, 16, 16] 0\n BasicBlock-11 [-1, 64, 16, 16] 0\n Conv2d-12 [-1, 64, 16, 16] 36,864\n BatchNorm2d-13 [-1, 64, 16, 16] 128\n ReLU-14 [-1, 64, 16, 16] 0\n Conv2d-15 [-1, 64, 16, 16] 36,864\n BatchNorm2d-16 [-1, 64, 16, 16] 128\n ReLU-17 [-1, 64, 16, 16] 0\n BasicBlock-18 [-1, 64, 16, 16] 0\n Conv2d-19 [-1, 128, 8, 8] 73,728\n BatchNorm2d-20 [-1, 128, 8, 8] 256\n ReLU-21 [-1, 128, 8, 8] 0\n Conv2d-22 [-1, 128, 8, 8] 147,456\n BatchNorm2d-23 [-1, 128, 8, 8] 256\n Conv2d-24 [-1, 128, 8, 8] 8,192\n BatchNorm2d-25 [-1, 128, 8, 8] 256\n ReLU-26 [-1, 128, 8, 8] 0\n BasicBlock-27 [-1, 128, 8, 8] 0\n Conv2d-28 [-1, 128, 8, 8] 147,456\n BatchNorm2d-29 [-1, 128, 8, 8] 256\n ReLU-30 [-1, 128, 8, 8] 0\n Conv2d-31 [-1, 128, 8, 8] 147,456\n BatchNorm2d-32 [-1, 128, 8, 8] 256\n ReLU-33 [-1, 128, 8, 8] 0\n BasicBlock-34 [-1, 128, 8, 8] 0\n Conv2d-35 [-1, 256, 4, 4] 294,912\n BatchNorm2d-36 [-1, 256, 4, 4] 512\n ReLU-37 [-1, 256, 4, 4] 0\n Conv2d-38 [-1, 256, 4, 4] 589,824\n BatchNorm2d-39 [-1, 256, 4, 4] 512\n Conv2d-40 [-1, 256, 4, 4] 32,768\n BatchNorm2d-41 [-1, 256, 4, 4] 512\n ReLU-42 [-1, 256, 4, 4] 0\n BasicBlock-43 [-1, 256, 4, 4] 0\n Conv2d-44 [-1, 256, 4, 4] 589,824\n BatchNorm2d-45 [-1, 256, 4, 4] 512\n ReLU-46 [-1, 256, 4, 4] 0\n Conv2d-47 [-1, 256, 4, 4] 589,824\n BatchNorm2d-48 [-1, 256, 4, 4] 512\n ReLU-49 [-1, 256, 4, 4] 0\n BasicBlock-50 [-1, 256, 4, 4] 0\n Conv2d-51 [-1, 512, 2, 2] 1,179,648\n BatchNorm2d-52 [-1, 512, 2, 2] 1,024\n ReLU-53 [-1, 512, 2, 2] 0\n Conv2d-54 [-1, 512, 2, 2] 2,359,296\n BatchNorm2d-55 [-1, 512, 2, 2] 1,024\n Conv2d-56 [-1, 512, 2, 2] 131,072\n BatchNorm2d-57 [-1, 512, 2, 2] 1,024\n ReLU-58 [-1, 512, 2, 2] 0\n BasicBlock-59 [-1, 512, 2, 2] 0\n Conv2d-60 [-1, 512, 2, 2] 2,359,296\n BatchNorm2d-61 [-1, 512, 2, 2] 1,024\n ReLU-62 [-1, 512, 2, 2] 0\n Conv2d-63 [-1, 512, 2, 2] 2,359,296\n BatchNorm2d-64 [-1, 512, 2, 2] 1,024\n ReLU-65 [-1, 512, 2, 2] 0\n BasicBlock-66 [-1, 512, 2, 2] 0\n MaxPool2d-67 [-1, 512, 1, 1] 0\n Conv2d-68 [-1, 1024, 1, 1] 4,719,616\n BatchNorm2d-69 [-1, 1024, 1, 1] 2,048\n ReLU-70 [-1, 1024, 1, 1] 0\n_ActivatedBatchNorm-71 [-1, 1024, 1, 1] 0 AdaptiveAvgPool2d-72 [-1, 1024, 1, 1] 0\n Linear-73 [-1, 64] 65,600\n ReLU-74 [-1, 64] 0\n Linear-75 [-1, 1024] 66,560\n Conv2d-76 [-1, 1, 1, 1] 1,024\n SCSEBlock-77 [-1, 1024, 1, 1] 0\n ConvTranspose2d-78 [-1, 512, 2, 2] 8,389,120\n DecoderUnetSCSE-79 [-1, 512, 2, 2] 0\n Conv2d-80 [-1, 1024, 2, 2] 9,438,208\n BatchNorm2d-81 [-1, 1024, 2, 2] 2,048\n ReLU-82 [-1, 1024, 2, 2] 0\n_ActivatedBatchNorm-83 [-1, 1024, 2, 2] 0 AdaptiveAvgPool2d-84 [-1, 1024, 1, 1] 0\n Linear-85 [-1, 64] 65,600\n ReLU-86 [-1, 64] 0\n Linear-87 [-1, 1024] 66,560\n Conv2d-88 [-1, 1, 2, 2] 1,024\n SCSEBlock-89 [-1, 1024, 2, 2] 0\n ConvTranspose2d-90 [-1, 256, 4, 4] 4,194,560\n DecoderUnetSCSE-91 [-1, 256, 4, 4] 0\n Conv2d-92 [-1, 512, 4, 4] 2,359,808\n BatchNorm2d-93 [-1, 512, 4, 4] 1,024\n ReLU-94 [-1, 512, 4, 4] 0\n_ActivatedBatchNorm-95 [-1, 512, 4, 4] 0 AdaptiveAvgPool2d-96 [-1, 512, 1, 1] 0\n Linear-97 [-1, 32] 16,416\n ReLU-98 [-1, 32] 0\n Linear-99 [-1, 512] 16,896\n Conv2d-100 [-1, 1, 4, 4] 512\n SCSEBlock-101 [-1, 512, 4, 4] 0\nConvTranspose2d-102 [-1, 128, 8, 8] 1,048,704 DecoderUnetSCSE-103 [-1, 128, 8, 8] 0\n Conv2d-104 [-1, 256, 8, 8] 590,080\n BatchNorm2d-105 [-1, 256, 8, 8] 512\n ReLU-106 [-1, 256, 8, 8] 0\n_ActivatedBatchNorm-107 [-1, 256, 8, 8] 0 AdaptiveAvgPool2d-108 [-1, 256, 1, 1] 0\n Linear-109 [-1, 16] 4,112\n ReLU-110 [-1, 16] 0\n Linear-111 [-1, 256] 4,352\n Conv2d-112 [-1, 1, 8, 8] 256\n SCSEBlock-113 [-1, 256, 8, 8] 0\nConvTranspose2d-114 [-1, 64, 16, 16] 262,208 DecoderUnetSCSE-115 [-1, 64, 16, 16] 0\n Conv2d-116 [-1, 128, 16, 16] 147,584\n BatchNorm2d-117 [-1, 128, 16, 16] 256\n ReLU-118 [-1, 128, 16, 16] 0\n_ActivatedBatchNorm-119 [-1, 128, 16, 16] 0 AdaptiveAvgPool2d-120 [-1, 128, 1, 1] 0\n Linear-121 [-1, 8] 1,032\n ReLU-122 [-1, 8] 0\n Linear-123 [-1, 128] 1,152\n Conv2d-124 [-1, 1, 16, 16] 128\n SCSEBlock-125 [-1, 128, 16, 16] 0\nConvTranspose2d-126 [-1, 32, 32, 32] 65,568 DecoderUnetSCSE-127 [-1, 32, 32, 32] 0\n Conv2d-128 [-1, 64, 32, 32] 55,360\n BatchNorm2d-129 [-1, 64, 32, 32] 128\n ReLU-130 [-1, 64, 32, 32] 0\n_ActivatedBatchNorm-131 [-1, 64, 32, 32] 0 AdaptiveAvgPool2d-132 [-1, 64, 1, 1] 0\n Linear-133 [-1, 4] 260\n ReLU-134 [-1, 4] 0\n Linear-135 [-1, 64] 320\n Conv2d-136 [-1, 1, 32, 32] 64\n SCSEBlock-137 [-1, 64, 32, 32] 0\nConvTranspose2d-138 [-1, 16, 64, 64] 16,400 DecoderUnetSCSE-139 [-1, 16, 64, 64] 0\n Conv2d-140 [-1, 64, 64, 64] 31,808\n BatchNorm2d-141 [-1, 64, 64, 64] 128\n ReLU-142 [-1, 64, 64, 64] 0\n_ActivatedBatchNorm-143 [-1, 64, 64, 64] 0\n Conv2d-144 [-1, 5, 64, 64] 325\nEncoderDecoderNet-145 [-1, 5, 64, 64] 0 ================================================================ Total params= 42,813,873 Trainable params= 42,813,873 Non-trainable params= 0 ---------------------------------------------------------------- Input size (MB)= 0.05 Forward/backward pass size (MB)= 20.35 Params size (MB)= 163.32 Estimated Total Size (MB)= 183.72 ----------------------------------------------------------------" - } - } -} diff --git a/examples/dlm-legacy/item.yml b/examples/dlm-legacy/item.yml deleted file mode 100644 index 812c61e..0000000 --- a/examples/dlm-legacy/item.yml +++ /dev/null @@ -1,357 +0,0 @@ -stac_version: 1.0.0-beta.2 # schema version -stac_extensions: # stac extension required - - dl-model # deep-learning model extension - - eo # eo extension - - scientific # scientific extension required for citations - - provider # metadata about providers -id: 11234 # Some ID for this item -type: Feature # Required by STAC item -geometry: # Required by STAC item - type: Polygon - coordinates: - - - - 180.0 - - 90.0 - - - -180.0 - - -90 - - - -180.0 - - 90.0 - - - 180.0 - - 90.0 - - - 180.0 - - -90.0 -bbox: # Required by STAC item - - -180.0 - - -90.0 - - 180.0 - - 90 -collection: a eo model catalog # name of the model catalog -# -# Links and assets (part of the core specs) -# -links: - - rel: self - href: https://landsat-stac.s3.amazonaws.com/some-eo-models/example-thelper-item.json - - rel: collection - href: https://landsat-stac.s3.amazonaws.com/some-eo-models/catalog.json -assets: - - model_archive: - href: https://drive.google.com/file/d/1PYyZVgH95454sb9LYHSfchbg8GuT__fR/view?usp=sharing - type: application/zip - title: model archive - description: model archive as a google drive link - role: - - model artefact -properties: - # - # General properties defined in the item core schema - # - datetime: "2016-05-03T13:22:30Z" - title: resnet18+unet_scse # short name of the model - description: >- # short description - UNet architecture with a resnet18 backbone and a SCSE layer - fine-tuned on Pleiade imagery - license: MIT # license of utilisation - created: 2020-12-12T00:00:01Z - updated: 2021-01-04T00:30:55Z - providers: # optional provider information (data, etc.) - - name: Effigis Inc. - roles: - - image licensor - url: https://effigis.com/en/ - - name: Airbus Inc. - roles: - - image provider - url: https://www.intelligence-airbusds.com/ - # Section on instruments (core stac item) - platform: Pleiade - gsd: 0.50 - # eo extension fields (describe the sensor spectal bands) - eo:bands: - - name: 50-cm panchromatic - common_name: pancro - center_wavelength: 400 - - name: blue - common_name: blue - center_wavelength: 490 - - name: green - common_name: green - center_wavelength: 500 - - name: red - common_name: red - center_wavelength: 660 - - name: Near Infrared - common_name: nir - center_wavelength: 850 - # - # Scientific references - # Based on STAC scientific extension - # - sci:publications: # relevant publications - - citation: >- - Abhijit Guha Roy and Nassir Navab and Christian Wachinger (2018). - Concurrent Spatial and Channel Squeeze & Excitation in Fully Convolutional Networks, - arXiv 1803.02579 - # - # Runtime metadata - # Describes the runtime environment - # - dlm:runtime: - framework: PyTorch # deep learning framework used - version: 1.5 # framework version - model_handler: thelper.cli.inference_session - model_src_url: https://github.com/crim-ca/gin-model-repo - model_commit_hash: - requirement_file: - # - # Docker specifications (optional) - # - docker: - docker_file: https://github.com/crim-ca/CCCOT03/blob/main/docker/thelper-geo.dockerfile # link to the docker file - image_name: thelper-geo:latest # official image name for the inference - tag: - gpu: true # we specify if this image needs a gpu or not - working_dir: /workspace # docker instance working directory - run: thelper infer --config /workspace/config.yml --save-dir /workspace --ckpt-path /workspace/ckpt.best.pth - # - # Describe the content of the model archive - # - dlm:archive: - - name: config.yml - role: - - config file - - name: ./test_pleiade/256/input_test.tif - role: - - test set - - name: ckpt.best.pth - role: - - model weight - # - # EO data description (required) - # - dlm:data: - process_level: ortho # expected processing level - dtype: uint16 # data type (enum) - number_of_bands: 4 # number of bands in the test file - useful_bands: # describes the bands that should be loaded - - index: 2 - name: red - - index: 1 - name: green - - index: 3 - name: nir - nodata_value: 0 - test_file: input_test.tif # name of a test file in the model archive - item_examples: # points toward relevant eo items - - title: a pleiade stac item - url: https://example.com/stac/pleiade/item.json - # - # Model input description (required) - # - dlm:inputs: - name: data # expected variable name - input_tensors: - batch: 1 # batch size - channels: 3 # number of channels - height: 224 # tensor height - width: 224 # tensor width - scaling_factor: 0.003921569 # scaling factor (usually 1/255) - normalization:mean: # input statistical normalization (mean) - - 0.245 - - 0.34 - - 0.67 - normalization:std: # input statistical normalization (std) - - 0.1 - - 0.1 - - 0.2 - selected_bands: # selected bands (0 is the first band) - - 0 - - 1 - - 3 - pre_processing_function: - # - # Model output description (required) - # - dlm:outputs: - task: semantic segmentation # describe the ML task (enum) - number_of_classes: 5 # number of classes - dont_care_index: 0 # Index value used for the excluded segments (don't care data) - final_layer_size: # size of the output - - 1 - - 5 - - 64 - - 64 - class_name_mapping: # mapping to short class names - - 0: dontcare - - 1: Bare Exposed Rock - - 2: High density Residential - - 3: Cropland - - 4: Mixed Forest Land - - 5: Lake - post_processing_function: - # - # Describes the model architecture - # - dlm:architecture: # describe the model architecture - total_nb_parameters: 42813873 # total number of parameters - estimated_total_size_mb: 183.72 # Total memory size in MB - type: unet-resnet-18-scse # type of architecture - pretrained: imagenet - summary: >- - ---------------------------------------------------------------- - Layer (type) Output Shape Param - ================================================================ - Conv2d-1 [-1, 64, 32, 32] 9,408 - BatchNorm2d-2 [-1, 64, 32, 32] 128 - ReLU-3 [-1, 64, 32, 32] 0 - MaxPool2d-4 [-1, 64, 16, 16] 0 - Conv2d-5 [-1, 64, 16, 16] 36,864 - BatchNorm2d-6 [-1, 64, 16, 16] 128 - ReLU-7 [-1, 64, 16, 16] 0 - Conv2d-8 [-1, 64, 16, 16] 36,864 - BatchNorm2d-9 [-1, 64, 16, 16] 128 - ReLU-10 [-1, 64, 16, 16] 0 - BasicBlock-11 [-1, 64, 16, 16] 0 - Conv2d-12 [-1, 64, 16, 16] 36,864 - BatchNorm2d-13 [-1, 64, 16, 16] 128 - ReLU-14 [-1, 64, 16, 16] 0 - Conv2d-15 [-1, 64, 16, 16] 36,864 - BatchNorm2d-16 [-1, 64, 16, 16] 128 - ReLU-17 [-1, 64, 16, 16] 0 - BasicBlock-18 [-1, 64, 16, 16] 0 - Conv2d-19 [-1, 128, 8, 8] 73,728 - BatchNorm2d-20 [-1, 128, 8, 8] 256 - ReLU-21 [-1, 128, 8, 8] 0 - Conv2d-22 [-1, 128, 8, 8] 147,456 - BatchNorm2d-23 [-1, 128, 8, 8] 256 - Conv2d-24 [-1, 128, 8, 8] 8,192 - BatchNorm2d-25 [-1, 128, 8, 8] 256 - ReLU-26 [-1, 128, 8, 8] 0 - BasicBlock-27 [-1, 128, 8, 8] 0 - Conv2d-28 [-1, 128, 8, 8] 147,456 - BatchNorm2d-29 [-1, 128, 8, 8] 256 - ReLU-30 [-1, 128, 8, 8] 0 - Conv2d-31 [-1, 128, 8, 8] 147,456 - BatchNorm2d-32 [-1, 128, 8, 8] 256 - ReLU-33 [-1, 128, 8, 8] 0 - BasicBlock-34 [-1, 128, 8, 8] 0 - Conv2d-35 [-1, 256, 4, 4] 294,912 - BatchNorm2d-36 [-1, 256, 4, 4] 512 - ReLU-37 [-1, 256, 4, 4] 0 - Conv2d-38 [-1, 256, 4, 4] 589,824 - BatchNorm2d-39 [-1, 256, 4, 4] 512 - Conv2d-40 [-1, 256, 4, 4] 32,768 - BatchNorm2d-41 [-1, 256, 4, 4] 512 - ReLU-42 [-1, 256, 4, 4] 0 - BasicBlock-43 [-1, 256, 4, 4] 0 - Conv2d-44 [-1, 256, 4, 4] 589,824 - BatchNorm2d-45 [-1, 256, 4, 4] 512 - ReLU-46 [-1, 256, 4, 4] 0 - Conv2d-47 [-1, 256, 4, 4] 589,824 - BatchNorm2d-48 [-1, 256, 4, 4] 512 - ReLU-49 [-1, 256, 4, 4] 0 - BasicBlock-50 [-1, 256, 4, 4] 0 - Conv2d-51 [-1, 512, 2, 2] 1,179,648 - BatchNorm2d-52 [-1, 512, 2, 2] 1,024 - ReLU-53 [-1, 512, 2, 2] 0 - Conv2d-54 [-1, 512, 2, 2] 2,359,296 - BatchNorm2d-55 [-1, 512, 2, 2] 1,024 - Conv2d-56 [-1, 512, 2, 2] 131,072 - BatchNorm2d-57 [-1, 512, 2, 2] 1,024 - ReLU-58 [-1, 512, 2, 2] 0 - BasicBlock-59 [-1, 512, 2, 2] 0 - Conv2d-60 [-1, 512, 2, 2] 2,359,296 - BatchNorm2d-61 [-1, 512, 2, 2] 1,024 - ReLU-62 [-1, 512, 2, 2] 0 - Conv2d-63 [-1, 512, 2, 2] 2,359,296 - BatchNorm2d-64 [-1, 512, 2, 2] 1,024 - ReLU-65 [-1, 512, 2, 2] 0 - BasicBlock-66 [-1, 512, 2, 2] 0 - MaxPool2d-67 [-1, 512, 1, 1] 0 - Conv2d-68 [-1, 1024, 1, 1] 4,719,616 - BatchNorm2d-69 [-1, 1024, 1, 1] 2,048 - ReLU-70 [-1, 1024, 1, 1] 0 - _ActivatedBatchNorm-71 [-1, 1024, 1, 1] 0 - AdaptiveAvgPool2d-72 [-1, 1024, 1, 1] 0 - Linear-73 [-1, 64] 65,600 - ReLU-74 [-1, 64] 0 - Linear-75 [-1, 1024] 66,560 - Conv2d-76 [-1, 1, 1, 1] 1,024 - SCSEBlock-77 [-1, 1024, 1, 1] 0 - ConvTranspose2d-78 [-1, 512, 2, 2] 8,389,120 - DecoderUnetSCSE-79 [-1, 512, 2, 2] 0 - Conv2d-80 [-1, 1024, 2, 2] 9,438,208 - BatchNorm2d-81 [-1, 1024, 2, 2] 2,048 - ReLU-82 [-1, 1024, 2, 2] 0 - _ActivatedBatchNorm-83 [-1, 1024, 2, 2] 0 - AdaptiveAvgPool2d-84 [-1, 1024, 1, 1] 0 - Linear-85 [-1, 64] 65,600 - ReLU-86 [-1, 64] 0 - Linear-87 [-1, 1024] 66,560 - Conv2d-88 [-1, 1, 2, 2] 1,024 - SCSEBlock-89 [-1, 1024, 2, 2] 0 - ConvTranspose2d-90 [-1, 256, 4, 4] 4,194,560 - DecoderUnetSCSE-91 [-1, 256, 4, 4] 0 - Conv2d-92 [-1, 512, 4, 4] 2,359,808 - BatchNorm2d-93 [-1, 512, 4, 4] 1,024 - ReLU-94 [-1, 512, 4, 4] 0 - _ActivatedBatchNorm-95 [-1, 512, 4, 4] 0 - AdaptiveAvgPool2d-96 [-1, 512, 1, 1] 0 - Linear-97 [-1, 32] 16,416 - ReLU-98 [-1, 32] 0 - Linear-99 [-1, 512] 16,896 - Conv2d-100 [-1, 1, 4, 4] 512 - SCSEBlock-101 [-1, 512, 4, 4] 0 - ConvTranspose2d-102 [-1, 128, 8, 8] 1,048,704 - DecoderUnetSCSE-103 [-1, 128, 8, 8] 0 - Conv2d-104 [-1, 256, 8, 8] 590,080 - BatchNorm2d-105 [-1, 256, 8, 8] 512 - ReLU-106 [-1, 256, 8, 8] 0 - _ActivatedBatchNorm-107 [-1, 256, 8, 8] 0 - AdaptiveAvgPool2d-108 [-1, 256, 1, 1] 0 - Linear-109 [-1, 16] 4,112 - ReLU-110 [-1, 16] 0 - Linear-111 [-1, 256] 4,352 - Conv2d-112 [-1, 1, 8, 8] 256 - SCSEBlock-113 [-1, 256, 8, 8] 0 - ConvTranspose2d-114 [-1, 64, 16, 16] 262,208 - DecoderUnetSCSE-115 [-1, 64, 16, 16] 0 - Conv2d-116 [-1, 128, 16, 16] 147,584 - BatchNorm2d-117 [-1, 128, 16, 16] 256 - ReLU-118 [-1, 128, 16, 16] 0 - _ActivatedBatchNorm-119 [-1, 128, 16, 16] 0 - AdaptiveAvgPool2d-120 [-1, 128, 1, 1] 0 - Linear-121 [-1, 8] 1,032 - ReLU-122 [-1, 8] 0 - Linear-123 [-1, 128] 1,152 - Conv2d-124 [-1, 1, 16, 16] 128 - SCSEBlock-125 [-1, 128, 16, 16] 0 - ConvTranspose2d-126 [-1, 32, 32, 32] 65,568 - DecoderUnetSCSE-127 [-1, 32, 32, 32] 0 - Conv2d-128 [-1, 64, 32, 32] 55,360 - BatchNorm2d-129 [-1, 64, 32, 32] 128 - ReLU-130 [-1, 64, 32, 32] 0 - _ActivatedBatchNorm-131 [-1, 64, 32, 32] 0 - AdaptiveAvgPool2d-132 [-1, 64, 1, 1] 0 - Linear-133 [-1, 4] 260 - ReLU-134 [-1, 4] 0 - Linear-135 [-1, 64] 320 - Conv2d-136 [-1, 1, 32, 32] 64 - SCSEBlock-137 [-1, 64, 32, 32] 0 - ConvTranspose2d-138 [-1, 16, 64, 64] 16,400 - DecoderUnetSCSE-139 [-1, 16, 64, 64] 0 - Conv2d-140 [-1, 64, 64, 64] 31,808 - BatchNorm2d-141 [-1, 64, 64, 64] 128 - ReLU-142 [-1, 64, 64, 64] 0 - _ActivatedBatchNorm-143 [-1, 64, 64, 64] 0 - Conv2d-144 [-1, 5, 64, 64] 325 - EncoderDecoderNet-145 [-1, 5, 64, 64] 0 - ================================================================ - Total params= 42,813,873 - Trainable params= 42,813,873 - Non-trainable params= 0 - ---------------------------------------------------------------- - Input size (MB)= 0.05 - Forward/backward pass size (MB)= 20.35 - Params size (MB)= 163.32 - Estimated Total Size (MB)= 183.72 - ---------------------------------------------------------------- diff --git a/examples/item_basic.json b/examples/item_basic.json index a4333dd..c766f10 100644 --- a/examples/item_basic.json +++ b/examples/item_basic.json @@ -5,6 +5,7 @@ ], "type": "Feature", "id": "example-model", + "collection": "ml-model-examples", "geometry": { "type": "Polygon", "coordinates": [ @@ -18,11 +19,11 @@ 58.21798141355221 ], [ - 27.911651652899925, + 27.911651652899923, 58.21798141355221 ], [ - 27.911651652899925, + 27.911651652899923, 37.13739173208318 ], [ @@ -39,6 +40,7 @@ 58.21798141355221 ], "properties": { + "description": "Basic STAC Item with only the MLM extension and no other extension cross-references.", "datetime": null, "start_datetime": "1900-01-01T00:00:00Z", "end_datetime": "9999-12-31T23:59:59Z", @@ -120,5 +122,17 @@ "mlm:model" ] } - } + }, + "links": [ + { + "rel": "collection", + "href": "./collection.json", + "type": "application/json" + }, + { + "rel": "self", + "href": "./item_basic.json", + "type": "application/geo+json" + } + ] } diff --git a/examples/item_eo_bands.json b/examples/item_eo_bands.json index 920fd97..f5831ec 100644 --- a/examples/item_eo_bands.json +++ b/examples/item_eo_bands.json @@ -9,6 +9,7 @@ ], "type": "Feature", "id": "resnet-18_sentinel-2_all_moco_classification", + "collection": "ml-model-examples", "geometry": { "type": "Polygon", "coordinates": [ @@ -22,11 +23,11 @@ 58.21798141355221 ], [ - 27.911651652899925, + 27.911651652899923, 58.21798141355221 ], [ - 27.911651652899925, + 27.911651652899923, 37.13739173208318 ], [ @@ -486,14 +487,6 @@ } ] }, - "links": [ - { - "rel": "derived_from", - "href": "https://earth-search.aws.element84.com/v1/collections/sentinel-2-l2a", - "type": "application/json", - "ml-aoi:split": "train" - } - ], "assets": { "weights": { "href": "https://huggingface.co/torchgeo/resnet18_sentinel2_all_moco/resolve/main/resnet18_sentinel2_all_moco-59bfdff9.pth", @@ -549,8 +542,8 @@ }, "source_code": { "href": "https://github.com/microsoft/torchgeo/blob/61efd2e2c4df7ebe3bd03002ebbaeaa3cfe9885a/torchgeo/models/resnet.py#L207", - "title": null, - "description": null, + "title": "Model implementation.", + "description": "Source code to run the model.", "type": "text/x-python", "roles": [ "mlm:model", @@ -558,5 +551,23 @@ "metadata" ] } - } + }, + "links": [ + { + "rel": "collection", + "href": "./collection.json", + "type": "application/json" + }, + { + "rel": "self", + "href": "./item_eo_bands.json", + "type": "application/geo+json" + }, + { + "rel": "derived_from", + "href": "https://earth-search.aws.element84.com/v1/collections/sentinel-2-l2a", + "type": "application/json", + "ml-aoi:split": "train" + } + ] } diff --git a/examples/item_multi_io.json b/examples/item_multi_io.json index c5482e8..3975699 100644 --- a/examples/item_multi_io.json +++ b/examples/item_multi_io.json @@ -8,6 +8,7 @@ ], "type": "Feature", "id": "resnet-18_sentinel-2_all_moco_classification", + "collection": "ml-model-examples", "geometry": { "type": "Polygon", "coordinates": [ @@ -21,11 +22,11 @@ 58.21798141355221 ], [ - 27.911651652899925, + 27.911651652899923, 58.21798141355221 ], [ - 27.911651652899925, + 27.911651652899923, 37.13739173208318 ], [ @@ -250,5 +251,23 @@ "mlm:weights" ] } - } + }, + "links": [ + { + "rel": "collection", + "href": "./collection.json", + "type": "application/json" + }, + { + "rel": "self", + "href": "./item_multi_io.json", + "type": "application/geo+json" + }, + { + "rel": "derived_from", + "href": "https://earth-search.aws.element84.com/v1/collections/sentinel-2-l2a", + "type": "application/json", + "ml-aoi:split": "train" + } + ] } diff --git a/examples/item_raster_bands.json b/examples/item_raster_bands.json index ed1d765..4faed91 100644 --- a/examples/item_raster_bands.json +++ b/examples/item_raster_bands.json @@ -8,6 +8,7 @@ ], "type": "Feature", "id": "resnet-18_sentinel-2_all_moco_classification", + "collection": "ml-model-examples", "geometry": { "type": "Polygon", "coordinates": [ @@ -21,11 +22,11 @@ 58.21798141355221 ], [ - 27.911651652899925, + 27.911651652899923, 58.21798141355221 ], [ - 27.911651652899925, + 27.911651652899923, 37.13739173208318 ], [ @@ -337,14 +338,6 @@ } ] }, - "links": [ - { - "rel": "derived_from", - "href": "https://earth-search.aws.element84.com/v1/collections/sentinel-2-l2a", - "type": "application/json", - "ml-aoi:split": "train" - } - ], "assets": { "weights": { "href": "https://huggingface.co/torchgeo/resnet18_sentinel2_all_moco/resolve/main/resnet18_sentinel2_all_moco-59bfdff9.pth", @@ -358,8 +351,8 @@ }, "source_code": { "href": "https://github.com/microsoft/torchgeo/blob/61efd2e2c4df7ebe3bd03002ebbaeaa3cfe9885a/torchgeo/models/resnet.py#L207", - "title": null, - "description": null, + "title": "Model implementation.", + "description": "Source code to run the model.", "type": "text/x-python", "roles": [ "mlm:model", @@ -367,5 +360,23 @@ "metadata" ] } - } + }, + "links": [ + { + "rel": "collection", + "href": "./collection.json", + "type": "application/json" + }, + { + "rel": "self", + "href": "./item_raster_bands.json", + "type": "application/geo+json" + }, + { + "rel": "derived_from", + "href": "https://earth-search.aws.element84.com/v1/collections/sentinel-2-l2a", + "type": "application/json", + "ml-aoi:split": "train" + } + ] } diff --git a/examples/model-arch-summary.txt b/examples/model-arch-summary.txt deleted file mode 100644 index 60f0792..0000000 --- a/examples/model-arch-summary.txt +++ /dev/null @@ -1,155 +0,0 @@ - ---------------------------------------------------------------- - Layer (type) Output Shape Param - ================================================================ - Conv2d-1 [-1, 64, 32, 32] 9,408 - BatchNorm2d-2 [-1, 64, 32, 32] 128 - ReLU-3 [-1, 64, 32, 32] 0 - MaxPool2d-4 [-1, 64, 16, 16] 0 - Conv2d-5 [-1, 64, 16, 16] 36,864 - BatchNorm2d-6 [-1, 64, 16, 16] 128 - ReLU-7 [-1, 64, 16, 16] 0 - Conv2d-8 [-1, 64, 16, 16] 36,864 - BatchNorm2d-9 [-1, 64, 16, 16] 128 - ReLU-10 [-1, 64, 16, 16] 0 - BasicBlock-11 [-1, 64, 16, 16] 0 - Conv2d-12 [-1, 64, 16, 16] 36,864 - BatchNorm2d-13 [-1, 64, 16, 16] 128 - ReLU-14 [-1, 64, 16, 16] 0 - Conv2d-15 [-1, 64, 16, 16] 36,864 - BatchNorm2d-16 [-1, 64, 16, 16] 128 - ReLU-17 [-1, 64, 16, 16] 0 - BasicBlock-18 [-1, 64, 16, 16] 0 - Conv2d-19 [-1, 128, 8, 8] 73,728 - BatchNorm2d-20 [-1, 128, 8, 8] 256 - ReLU-21 [-1, 128, 8, 8] 0 - Conv2d-22 [-1, 128, 8, 8] 147,456 - BatchNorm2d-23 [-1, 128, 8, 8] 256 - Conv2d-24 [-1, 128, 8, 8] 8,192 - BatchNorm2d-25 [-1, 128, 8, 8] 256 - ReLU-26 [-1, 128, 8, 8] 0 - BasicBlock-27 [-1, 128, 8, 8] 0 - Conv2d-28 [-1, 128, 8, 8] 147,456 - BatchNorm2d-29 [-1, 128, 8, 8] 256 - ReLU-30 [-1, 128, 8, 8] 0 - Conv2d-31 [-1, 128, 8, 8] 147,456 - BatchNorm2d-32 [-1, 128, 8, 8] 256 - ReLU-33 [-1, 128, 8, 8] 0 - BasicBlock-34 [-1, 128, 8, 8] 0 - Conv2d-35 [-1, 256, 4, 4] 294,912 - BatchNorm2d-36 [-1, 256, 4, 4] 512 - ReLU-37 [-1, 256, 4, 4] 0 - Conv2d-38 [-1, 256, 4, 4] 589,824 - BatchNorm2d-39 [-1, 256, 4, 4] 512 - Conv2d-40 [-1, 256, 4, 4] 32,768 - BatchNorm2d-41 [-1, 256, 4, 4] 512 - ReLU-42 [-1, 256, 4, 4] 0 - BasicBlock-43 [-1, 256, 4, 4] 0 - Conv2d-44 [-1, 256, 4, 4] 589,824 - BatchNorm2d-45 [-1, 256, 4, 4] 512 - ReLU-46 [-1, 256, 4, 4] 0 - Conv2d-47 [-1, 256, 4, 4] 589,824 - BatchNorm2d-48 [-1, 256, 4, 4] 512 - ReLU-49 [-1, 256, 4, 4] 0 - BasicBlock-50 [-1, 256, 4, 4] 0 - Conv2d-51 [-1, 512, 2, 2] 1,179,648 - BatchNorm2d-52 [-1, 512, 2, 2] 1,024 - ReLU-53 [-1, 512, 2, 2] 0 - Conv2d-54 [-1, 512, 2, 2] 2,359,296 - BatchNorm2d-55 [-1, 512, 2, 2] 1,024 - Conv2d-56 [-1, 512, 2, 2] 131,072 - BatchNorm2d-57 [-1, 512, 2, 2] 1,024 - ReLU-58 [-1, 512, 2, 2] 0 - BasicBlock-59 [-1, 512, 2, 2] 0 - Conv2d-60 [-1, 512, 2, 2] 2,359,296 - BatchNorm2d-61 [-1, 512, 2, 2] 1,024 - ReLU-62 [-1, 512, 2, 2] 0 - Conv2d-63 [-1, 512, 2, 2] 2,359,296 - BatchNorm2d-64 [-1, 512, 2, 2] 1,024 - ReLU-65 [-1, 512, 2, 2] 0 - BasicBlock-66 [-1, 512, 2, 2] 0 - MaxPool2d-67 [-1, 512, 1, 1] 0 - Conv2d-68 [-1, 1024, 1, 1] 4,719,616 - BatchNorm2d-69 [-1, 1024, 1, 1] 2,048 - ReLU-70 [-1, 1024, 1, 1] 0 - _ActivatedBatchNorm-71 [-1, 1024, 1, 1] 0 - AdaptiveAvgPool2d-72 [-1, 1024, 1, 1] 0 - Linear-73 [-1, 64] 65,600 - ReLU-74 [-1, 64] 0 - Linear-75 [-1, 1024] 66,560 - Conv2d-76 [-1, 1, 1, 1] 1,024 - SCSEBlock-77 [-1, 1024, 1, 1] 0 - ConvTranspose2d-78 [-1, 512, 2, 2] 8,389,120 - DecoderUnetSCSE-79 [-1, 512, 2, 2] 0 - Conv2d-80 [-1, 1024, 2, 2] 9,438,208 - BatchNorm2d-81 [-1, 1024, 2, 2] 2,048 - ReLU-82 [-1, 1024, 2, 2] 0 - _ActivatedBatchNorm-83 [-1, 1024, 2, 2] 0 - AdaptiveAvgPool2d-84 [-1, 1024, 1, 1] 0 - Linear-85 [-1, 64] 65,600 - ReLU-86 [-1, 64] 0 - Linear-87 [-1, 1024] 66,560 - Conv2d-88 [-1, 1, 2, 2] 1,024 - SCSEBlock-89 [-1, 1024, 2, 2] 0 - ConvTranspose2d-90 [-1, 256, 4, 4] 4,194,560 - DecoderUnetSCSE-91 [-1, 256, 4, 4] 0 - Conv2d-92 [-1, 512, 4, 4] 2,359,808 - BatchNorm2d-93 [-1, 512, 4, 4] 1,024 - ReLU-94 [-1, 512, 4, 4] 0 - _ActivatedBatchNorm-95 [-1, 512, 4, 4] 0 - AdaptiveAvgPool2d-96 [-1, 512, 1, 1] 0 - Linear-97 [-1, 32] 16,416 - ReLU-98 [-1, 32] 0 - Linear-99 [-1, 512] 16,896 - Conv2d-100 [-1, 1, 4, 4] 512 - SCSEBlock-101 [-1, 512, 4, 4] 0 - ConvTranspose2d-102 [-1, 128, 8, 8] 1,048,704 - DecoderUnetSCSE-103 [-1, 128, 8, 8] 0 - Conv2d-104 [-1, 256, 8, 8] 590,080 - BatchNorm2d-105 [-1, 256, 8, 8] 512 - ReLU-106 [-1, 256, 8, 8] 0 - _ActivatedBatchNorm-107 [-1, 256, 8, 8] 0 - AdaptiveAvgPool2d-108 [-1, 256, 1, 1] 0 - Linear-109 [-1, 16] 4,112 - ReLU-110 [-1, 16] 0 - Linear-111 [-1, 256] 4,352 - Conv2d-112 [-1, 1, 8, 8] 256 - SCSEBlock-113 [-1, 256, 8, 8] 0 - ConvTranspose2d-114 [-1, 64, 16, 16] 262,208 - DecoderUnetSCSE-115 [-1, 64, 16, 16] 0 - Conv2d-116 [-1, 128, 16, 16] 147,584 - BatchNorm2d-117 [-1, 128, 16, 16] 256 - ReLU-118 [-1, 128, 16, 16] 0 - _ActivatedBatchNorm-119 [-1, 128, 16, 16] 0 - AdaptiveAvgPool2d-120 [-1, 128, 1, 1] 0 - Linear-121 [-1, 8] 1,032 - ReLU-122 [-1, 8] 0 - Linear-123 [-1, 128] 1,152 - Conv2d-124 [-1, 1, 16, 16] 128 - SCSEBlock-125 [-1, 128, 16, 16] 0 - ConvTranspose2d-126 [-1, 32, 32, 32] 65,568 - DecoderUnetSCSE-127 [-1, 32, 32, 32] 0 - Conv2d-128 [-1, 64, 32, 32] 55,360 - BatchNorm2d-129 [-1, 64, 32, 32] 128 - ReLU-130 [-1, 64, 32, 32] 0 - ReLU-134 [-1, 4] 0 - Linear-135 [-1, 64] 320 - Conv2d-136 [-1, 1, 32, 32] 64 - SCSEBlock-137 [-1, 64, 32, 32] 0 - ConvTranspose2d-138 [-1, 16, 64, 64] 16,400 - DecoderUnetSCSE-139 [-1, 16, 64, 64] 0 - Conv2d-140 [-1, 64, 64, 64] 31,808 - BatchNorm2d-141 [-1, 64, 64, 64] 128 - ReLU-142 [-1, 64, 64, 64] 0 - _ActivatedBatchNorm-143 [-1, 64, 64, 64] 0 - Conv2d-144 [-1, 5, 64, 64] 325 - EncoderDecoderNet-145 [-1, 5, 64, 64] 0 - ================================================================ - Total params= 42,813,873 - Trainable params= 42,813,873 - Non-trainable params= 0 - ---------------------------------------------------------------- - Input size (MB)= 0.05 - Forward/backward pass size (MB)= 20.35 - Params size (MB)= 163.32 - Estimated Total Size (MB)= 183.72 - ---------------------------------------------------------------- diff --git a/package.json b/package.json index da64d8c..24e9f20 100644 --- a/package.json +++ b/package.json @@ -5,8 +5,8 @@ "test": "npm run check-markdown && npm run check-examples", "check-markdown": "remark . -f -r .github/remark.yaml -i .remarkignore", "format-markdown": "remark . -f -r .github/remark.yaml -i .remarkignore -o", - "check-examples": "stac-node-validator . --lint --verbose --schemaMap https://stac-extensions.github.io/template/v1.0.0/schema.json=./json-schema/schema.json", - "format-examples": "stac-node-validator . --format --schemaMap https://stac-extensions.github.io/template/v1.0.0/schema.json=./json-schema/schema.json" + "check-examples": "stac-node-validator . --lint --verbose --schemaMap https://stac-extensions.github.io/mlm/v1.0.0/schema.json=./json-schema/schema.json", + "format-examples": "stac-node-validator . --format --schemaMap https://stac-extensions.github.io/mlm/v1.0.0/schema.json=./json-schema/schema.json" }, "dependencies": { "remark-cli": "^8.0.0", diff --git a/stac_model/examples.py b/stac_model/examples.py index dcde946..df25326 100644 --- a/stac_model/examples.py +++ b/stac_model/examples.py @@ -12,7 +12,7 @@ def eurosat_resnet() -> ItemMLModelExtension: - input_array = InputStructure( + input_struct = InputStructure( shape=[-1, 13, 64, 64], dim_order=["batch", "channel", "height", "width"], data_type="float32", @@ -66,10 +66,10 @@ def eurosat_resnet() -> ItemMLModelExtension: MLMStatistic(mean=mean, stddev=stddev) for mean, stddev in zip(stats_mean, stats_stddev) ] - input = ModelInput( + model_input = ModelInput( name="13 Band Sentinel-2 Batch", bands=band_names, - input=input_array, + input=input_struct, norm_by_channel=True, norm_type="z-score", resize_type=None, @@ -79,7 +79,7 @@ def eurosat_resnet() -> ItemMLModelExtension: expression="torchgeo.datamodules.eurosat.EuroSATDataModule.collate_fn", ), # noqa: E501 ) - result_array = ModelResult( + result_struct = ModelResult( shape=[-1, 10], dim_order=["batch", "class"], data_type="float32" @@ -100,11 +100,11 @@ def eurosat_resnet() -> ItemMLModelExtension: MLMClassification(value=class_value, name=class_name) for class_name, class_value in class_map.items() ] - output = ModelOutput( + model_output = ModelOutput( name="classification", tasks={"classification"}, classes=class_objects, - result=result_array, + result=result_struct, post_processing_function=None, ) assets = { @@ -123,6 +123,8 @@ def eurosat_resnet() -> ItemMLModelExtension: ] ), "source_code": pystac.Asset( + title="Model implementation.", + description="Source code to run the model.", href="https://github.com/microsoft/torchgeo/blob/61efd2e2c4df7ebe3bd03002ebbaeaa3cfe9885a/torchgeo/models/resnet.py#L207", media_type="text/x-python", roles=[ @@ -147,8 +149,8 @@ def eurosat_resnet() -> ItemMLModelExtension: pretrained=True, pretrained_source="EuroSat Sentinel-2", total_parameters=11_700_000, - input=[input], - output=[output], + input=[model_input], + output=[model_output], ) # TODO, this can't be serialized but pystac.item calls for a datetime # in docs. start_datetime=datetime.strptime("1900-01-01", "%Y-%m-%d") @@ -162,9 +164,11 @@ def eurosat_resnet() -> ItemMLModelExtension: 58.21798141355221 ] geometry = shapely.geometry.Polygon.from_bounds(*bbox).__geo_interface__ - name = "_".join(ml_model_meta.name.split(" ")).lower() + item_name = "_".join(ml_model_meta.name.split(" ")).lower() + col_name = "ml-model-examples" item = pystac.Item( - id=name, + id=item_name, + collection=col_name, geometry=geometry, bbox=bbox, datetime=None, @@ -179,6 +183,16 @@ def eurosat_resnet() -> ItemMLModelExtension: ) item.add_derived_from("https://earth-search.aws.element84.com/v1/collections/sentinel-2-l2a") + # define more link references + example_catalog = pystac.Catalog( + "ml-model-examples", + "ml-model-examples", + catalog_type=pystac.CatalogType.RELATIVE_PUBLISHED, + href="https://raw.githubusercontent.com/crim-ca/dlm-extension/main/json-schema/", + ) + item.set_root(example_catalog) + item.set_self_href(f"./{item_name}") + model_asset = cast( FileExtension[pystac.Asset], pystac.extensions.file.FileExtension.ext(assets["model"], add_if_missing=True) From 728dcbaf2546b29c2777579905073309de6cccf9 Mon Sep 17 00:00:00 2001 From: Francis Charette-Migneault Date: Thu, 18 Apr 2024 14:16:08 -0400 Subject: [PATCH 108/112] fix STAC object self-references in python tests --- stac_model/examples.py | 43 ++++++++++++++++++++++++++++-------------- 1 file changed, 29 insertions(+), 14 deletions(-) diff --git a/stac_model/examples.py b/stac_model/examples.py index df25326..c781a5b 100644 --- a/stac_model/examples.py +++ b/stac_model/examples.py @@ -155,8 +155,10 @@ def eurosat_resnet() -> ItemMLModelExtension: # TODO, this can't be serialized but pystac.item calls for a datetime # in docs. start_datetime=datetime.strptime("1900-01-01", "%Y-%m-%d") # Is this a problem that we don't do date validation if we supply as str? - start_datetime = "1900-01-01" - end_datetime = "9999-01-01" # cannot be None, invalid against STAC Core! + start_datetime_str = "1900-01-01" + end_datetime_str = "9999-01-01" # cannot be None, invalid against STAC Core! + start_datetime = parse_dt(start_datetime_str).isoformat() + "Z" + end_datetime = parse_dt(end_datetime_str).isoformat() + "Z" bbox = [ -7.882190080512502, 37.13739173208318, @@ -164,7 +166,7 @@ def eurosat_resnet() -> ItemMLModelExtension: 58.21798141355221 ] geometry = shapely.geometry.Polygon.from_bounds(*bbox).__geo_interface__ - item_name = "_".join(ml_model_meta.name.split(" ")).lower() + item_name = "item_basic" col_name = "ml-model-examples" item = pystac.Item( id=item_name, @@ -173,25 +175,38 @@ def eurosat_resnet() -> ItemMLModelExtension: bbox=bbox, datetime=None, properties={ - "start_datetime": parse_dt(start_datetime).isoformat() + "Z", - "end_datetime": parse_dt(end_datetime).isoformat() + "Z", + "start_datetime": start_datetime, + "end_datetime": end_datetime, "description": ( - "Sourced from torchgeo python library," "identifier is ResNet18_Weights.SENTINEL2_ALL_MOCO" + "Sourced from torchgeo python library, identifier is ResNet18_Weights.SENTINEL2_ALL_MOCO" ), }, assets=assets, ) - item.add_derived_from("https://earth-search.aws.element84.com/v1/collections/sentinel-2-l2a") + + # note: cannot use 'item.add_derived_from' since it expects a 'Item' object, but we refer to a 'Collection' here + # item.add_derived_from("https://earth-search.aws.element84.com/v1/collections/sentinel-2-l2a") + item.add_link( + pystac.Link( + target="https://earth-search.aws.element84.com/v1/collections/sentinel-2-l2a", + rel=pystac.RelType.DERIVED_FROM, + media_type=pystac.MediaType.JSON, + ) + ) # define more link references - example_catalog = pystac.Catalog( - "ml-model-examples", - "ml-model-examples", - catalog_type=pystac.CatalogType.RELATIVE_PUBLISHED, - href="https://raw.githubusercontent.com/crim-ca/dlm-extension/main/json-schema/", + col = pystac.Collection( + id=col_name, + title="Machine Learning Model examples", + description="Collection of items contained in the Machine Learning Model examples.", + extent=pystac.Extent( + temporal=pystac.TemporalExtent([[parse_dt(start_datetime), parse_dt(end_datetime)]]), + spatial=pystac.SpatialExtent([bbox]), + ) ) - item.set_root(example_catalog) - item.set_self_href(f"./{item_name}") + col.set_self_href("./examples/collection.json") + col.add_item(item) + item.set_self_href(f"./examples/{item_name}.json") model_asset = cast( FileExtension[pystac.Asset], From ead9833f6d403012db7512d27b03b586b3498a54 Mon Sep 17 00:00:00 2001 From: Francis Charette-Migneault Date: Thu, 18 Apr 2024 14:18:39 -0400 Subject: [PATCH 109/112] add Python 3.12 to CI + rename CI to be more representative than 'build' --- .github/workflows/{build.yml => stac-model.yml} | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) rename .github/workflows/{build.yml => stac-model.yml} (87%) diff --git a/.github/workflows/build.yml b/.github/workflows/stac-model.yml similarity index 87% rename from .github/workflows/build.yml rename to .github/workflows/stac-model.yml index 250a7a3..b400632 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/stac-model.yml @@ -1,15 +1,13 @@ - -# UPDATEME to suit your project's workflow -name: build +name: Check Python Linting and Tests on: [push, pull_request] jobs: - build: + stac-model: runs-on: ubuntu-latest strategy: matrix: - python-version: ["3.10", "3.11"] + python-version: ["3.10", "3.11", "3.12"] steps: - uses: actions/checkout@v2 From 7ef029d55f27a7c04b7b84c4ad67575014ddb79f Mon Sep 17 00:00:00 2001 From: Francis Charette-Migneault Date: Thu, 18 Apr 2024 14:43:48 -0400 Subject: [PATCH 110/112] fix incorrectly interpretation of pydoclint exclude dirs --- pyproject.toml | 18 +----------------- 1 file changed, 1 insertion(+), 17 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 05160d7..dfce856 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -168,23 +168,7 @@ add_ignore = "D100,D101,D102,D103,D104,D105,D107,D200,D202,D204,D212,D401" # https://github.com/jsh9/pydoclint # https://jsh9.github.io/pydoclint/how_to_config.html style = "google" -exclude = ''' -/( - \.git - | \.hg - | \.mypy_cache - | \.tox - | \.venv - | __pycache__ - | _build - | buck-out - | build - | dist - | env - | venv - | node_modules -)/ -''' +exclude = '\.git|\.hg|\.mypy_cache|\.tox|.?v?env|__pycache__|_build|buck-out|dist|node_modules' # don't require type hints, since we have them in the signature instead (don't duplicate) arg-type-hints-in-docstring = false arg-type-hints-in-signature = true From b1804fc53f325d36c37e315406f0aaf9ca6e343f Mon Sep 17 00:00:00 2001 From: Francis Charette-Migneault Date: Thu, 18 Apr 2024 14:50:10 -0400 Subject: [PATCH 111/112] remove unnecessary package with dependency flagged by safety --- pyproject.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index dfce856..4a50ef6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -73,7 +73,6 @@ pydocstyle = {extras = ["toml"], version = "^6.2.0"} pydoclint = "^0.3.0" pytest = "^7.2.1" -pytest-html = "^3.2.0" pytest-cov = "^4.1.0" pytest-mock = "^3.10.0" pytest-timeout = "^2.2.0" From f2b7dc2a590cd45795b3a5054684655d182c5449 Mon Sep 17 00:00:00 2001 From: Francis Charette-Migneault Date: Thu, 18 Apr 2024 14:50:17 -0400 Subject: [PATCH 112/112] remove unnecessary package with dependency flagged by safety --- poetry.lock | 48 ++---------------------------------------------- 1 file changed, 2 insertions(+), 46 deletions(-) diff --git a/poetry.lock b/poetry.lock index b563e0e..8c1513c 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.7.0 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. [[package]] name = "annotated-types" @@ -629,17 +629,6 @@ nodeenv = ">=0.11.1" pyyaml = ">=5.1" virtualenv = ">=20.10.0" -[[package]] -name = "py" -version = "1.11.0" -description = "library with cross-python path, ini-parsing, io, code, log facilities" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" -files = [ - {file = "py-1.11.0-py2.py3-none-any.whl", hash = "sha256:607c53218732647dff4acdfcd50cb62615cedf612e72d1724fb1a0cc6405b378"}, - {file = "py-1.11.0.tar.gz", hash = "sha256:51c75c4126074b472f746a24399ad32f6053d1b34b68d2fa41e558e6f4a98719"}, -] - [[package]] name = "py-cpuinfo" version = "9.0.0" @@ -911,39 +900,6 @@ pytest = ">=4.6" [package.extras] testing = ["fields", "hunter", "process-tests", "pytest-xdist", "six", "virtualenv"] -[[package]] -name = "pytest-html" -version = "3.2.0" -description = "pytest plugin for generating HTML reports" -optional = false -python-versions = ">=3.6" -files = [ - {file = "pytest-html-3.2.0.tar.gz", hash = "sha256:c4e2f4bb0bffc437f51ad2174a8a3e71df81bbc2f6894604e604af18fbe687c3"}, - {file = "pytest_html-3.2.0-py3-none-any.whl", hash = "sha256:868c08564a68d8b2c26866f1e33178419bb35b1e127c33784a28622eb827f3f3"}, -] - -[package.dependencies] -py = ">=1.8.2" -pytest = ">=5.0,<6.0.0 || >6.0.0" -pytest-metadata = "*" - -[[package]] -name = "pytest-metadata" -version = "3.1.1" -description = "pytest plugin for test session metadata" -optional = false -python-versions = ">=3.8" -files = [ - {file = "pytest_metadata-3.1.1-py3-none-any.whl", hash = "sha256:c8e0844db684ee1c798cfa38908d20d67d0463ecb6137c72e91f418558dd5f4b"}, - {file = "pytest_metadata-3.1.1.tar.gz", hash = "sha256:d2a29b0355fbc03f168aa96d41ff88b1a3b44a3b02acbe491801c98a048017c8"}, -] - -[package.dependencies] -pytest = ">=7.0.0" - -[package.extras] -test = ["black (>=22.1.0)", "flake8 (>=4.0.1)", "pre-commit (>=2.17.0)", "tox (>=3.24.5)"] - [[package]] name = "pytest-mock" version = "3.14.0" @@ -1590,4 +1546,4 @@ test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess [metadata] lock-version = "2.0" python-versions = "^3.10" -content-hash = "cffd6f5f281a2af19da8a5834277d91f533509803d9a3afc59df73228591b6a7" +content-hash = "06fb206fe0cc4eafcb151f75a9276436489a3423ca0cf59b6a2e479b51e4d934"